FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/imgutils.h"
39 #include "libavutil/mem_internal.h"
40 
41 #include "avcodec.h"
42 #include "codec_internal.h"
43 #include "decode.h"
44 #include "get_bits.h"
45 #include "hpeldsp.h"
46 #include "jpegquanttables.h"
47 #include "mathops.h"
48 #include "thread.h"
49 #include "threadframe.h"
50 #include "videodsp.h"
51 #include "vp3data.h"
52 #include "vp4data.h"
53 #include "vp3dsp.h"
54 #include "xiph.h"
55 
56 #define VP3_MV_VLC_BITS 6
57 #define VP4_MV_VLC_BITS 6
58 #define SUPERBLOCK_VLC_BITS 6
59 
60 #define FRAGMENT_PIXELS 8
61 
62 // FIXME split things out into their own arrays
63 typedef struct Vp3Fragment {
64  int16_t dc;
65  uint8_t coding_method;
66  uint8_t qpi;
67 } Vp3Fragment;
68 
69 #define SB_NOT_CODED 0
70 #define SB_PARTIALLY_CODED 1
71 #define SB_FULLY_CODED 2
72 
73 // This is the maximum length of a single long bit run that can be encoded
74 // for superblock coding or block qps. Theora special-cases this to read a
75 // bit instead of flipping the current bit to allow for runs longer than 4129.
76 #define MAXIMUM_LONG_BIT_RUN 4129
77 
78 #define MODE_INTER_NO_MV 0
79 #define MODE_INTRA 1
80 #define MODE_INTER_PLUS_MV 2
81 #define MODE_INTER_LAST_MV 3
82 #define MODE_INTER_PRIOR_LAST 4
83 #define MODE_USING_GOLDEN 5
84 #define MODE_GOLDEN_MV 6
85 #define MODE_INTER_FOURMV 7
86 #define CODING_MODE_COUNT 8
87 
88 /* special internal mode */
89 #define MODE_COPY 8
90 
91 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
92 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
93 
94 
95 /* There are 6 preset schemes, plus a free-form scheme */
96 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
97  /* scheme 1: Last motion vector dominates */
102 
103  /* scheme 2 */
108 
109  /* scheme 3 */
114 
115  /* scheme 4 */
120 
121  /* scheme 5: No motion vector dominates */
126 
127  /* scheme 6 */
132 };
133 
134 static const uint8_t hilbert_offset[16][2] = {
135  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
136  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
137  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
138  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
139 };
140 
141 enum {
147 };
148 
149 static const uint8_t vp4_pred_block_type_map[8] = {
158 };
159 
160 typedef struct {
161  int dc;
162  int type;
163 } VP4Predictor;
164 
165 #define MIN_DEQUANT_VAL 2
166 
167 typedef struct HuffEntry {
168  uint8_t len, sym;
169 } HuffEntry;
170 
171 typedef struct HuffTable {
173  uint8_t nb_entries;
174 } HuffTable;
175 
176 typedef struct Vp3DecodeContext {
179  int version;
180  int width, height;
185  int keyframe;
186  uint8_t idct_permutation[64];
187  uint8_t idct_scantable[64];
191  DECLARE_ALIGNED(16, int16_t, block)[64];
195 
196  int qps[3];
197  int nqps;
198  int last_qps[3];
199 
209  unsigned char *superblock_coding;
210 
211  int macroblock_count; /* y macroblock count */
217  int yuv_macroblock_count; /* y+u+v macroblock count */
218 
222 
225  int data_offset[3];
226  uint8_t offset_x;
227  uint8_t offset_y;
229 
230  int8_t (*motion_val[2])[2];
231 
232  /* tables */
233  uint16_t coded_dc_scale_factor[2][64];
234  uint32_t coded_ac_scale_factor[64];
235  uint8_t base_matrix[384][64];
236  uint8_t qr_count[2][3];
237  uint8_t qr_size[2][3][64];
238  uint16_t qr_base[2][3][64];
239 
240  /**
241  * This is a list of all tokens in bitstream order. Reordering takes place
242  * by pulling from each level during IDCT. As a consequence, IDCT must be
243  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
244  * otherwise. The 32 different tokens with up to 12 bits of extradata are
245  * collapsed into 3 types, packed as follows:
246  * (from the low to high bits)
247  *
248  * 2 bits: type (0,1,2)
249  * 0: EOB run, 14 bits for run length (12 needed)
250  * 1: zero run, 7 bits for run length
251  * 7 bits for the next coefficient (3 needed)
252  * 2: coefficient, 14 bits (11 needed)
253  *
254  * Coefficients are signed, so are packed in the highest bits for automatic
255  * sign extension.
256  */
257  int16_t *dct_tokens[3][64];
258  int16_t *dct_tokens_base;
259 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
260 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
261 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
262 
263  /**
264  * number of blocks that contain DCT coefficients at
265  * the given level or higher
266  */
267  int num_coded_frags[3][64];
269 
270  /* this is a list of indexes into the all_fragments array indicating
271  * which of the fragments are coded */
273 
277 
278  /* The first 16 of the following VLCs are for the dc coefficients;
279  the others are four groups of 16 VLCs each for ac coefficients. */
280  VLC coeff_vlc[5 * 16];
281 
282  VLC superblock_run_length_vlc; /* version < 2 */
283  VLC fragment_run_length_vlc; /* version < 2 */
284  VLC block_pattern_vlc[2]; /* version >= 2*/
286  VLC motion_vector_vlc; /* version < 2 */
287  VLC vp4_mv_vlc[2][7]; /* version >=2 */
288 
289  /* these arrays need to be on 16-byte boundaries since SSE2 operations
290  * index into them */
291  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
292 
293  /* This table contains superblock_count * 16 entries. Each set of 16
294  * numbers corresponds to the fragment indexes 0..15 of the superblock.
295  * An entry will be -1 to indicate that no entry corresponds to that
296  * index. */
298 
299  /* This is an array that indicates how a particular macroblock
300  * is coded. */
301  unsigned char *macroblock_coding;
302 
303  uint8_t *edge_emu_buffer;
304 
305  /* Huffman decode */
307 
308  uint8_t filter_limit_values[64];
310 
311  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
313 
314 /************************************************************************
315  * VP3 specific functions
316  ************************************************************************/
317 
318 static av_cold void free_tables(AVCodecContext *avctx)
319 {
320  Vp3DecodeContext *s = avctx->priv_data;
321 
322  av_freep(&s->superblock_coding);
323  av_freep(&s->all_fragments);
324  av_freep(&s->nkf_coded_fragment_list);
325  av_freep(&s->kf_coded_fragment_list);
326  av_freep(&s->dct_tokens_base);
327  av_freep(&s->superblock_fragments);
328  av_freep(&s->macroblock_coding);
329  av_freep(&s->dc_pred_row);
330  av_freep(&s->motion_val[0]);
331  av_freep(&s->motion_val[1]);
332 }
333 
334 static void vp3_decode_flush(AVCodecContext *avctx)
335 {
336  Vp3DecodeContext *s = avctx->priv_data;
337 
338  if (s->golden_frame.f)
339  ff_thread_release_ext_buffer(avctx, &s->golden_frame);
340  if (s->last_frame.f)
341  ff_thread_release_ext_buffer(avctx, &s->last_frame);
342  if (s->current_frame.f)
343  ff_thread_release_ext_buffer(avctx, &s->current_frame);
344 }
345 
347 {
348  Vp3DecodeContext *s = avctx->priv_data;
349  int i, j;
350 
351  free_tables(avctx);
352  av_freep(&s->edge_emu_buffer);
353 
354  s->theora_tables = 0;
355 
356  /* release all frames */
357  vp3_decode_flush(avctx);
358  av_frame_free(&s->current_frame.f);
359  av_frame_free(&s->last_frame.f);
360  av_frame_free(&s->golden_frame.f);
361 
362  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++)
363  ff_free_vlc(&s->coeff_vlc[i]);
364 
365  ff_free_vlc(&s->superblock_run_length_vlc);
366  ff_free_vlc(&s->fragment_run_length_vlc);
367  ff_free_vlc(&s->mode_code_vlc);
368  ff_free_vlc(&s->motion_vector_vlc);
369 
370  for (j = 0; j < 2; j++)
371  for (i = 0; i < 7; i++)
372  ff_free_vlc(&s->vp4_mv_vlc[j][i]);
373 
374  for (i = 0; i < 2; i++)
375  ff_free_vlc(&s->block_pattern_vlc[i]);
376  return 0;
377 }
378 
379 /**
380  * This function sets up all of the various blocks mappings:
381  * superblocks <-> fragments, macroblocks <-> fragments,
382  * superblocks <-> macroblocks
383  *
384  * @return 0 is successful; returns 1 if *anything* went wrong.
385  */
387 {
388  int sb_x, sb_y, plane;
389  int x, y, i, j = 0;
390 
391  for (plane = 0; plane < 3; plane++) {
392  int sb_width = plane ? s->c_superblock_width
393  : s->y_superblock_width;
394  int sb_height = plane ? s->c_superblock_height
395  : s->y_superblock_height;
396  int frag_width = s->fragment_width[!!plane];
397  int frag_height = s->fragment_height[!!plane];
398 
399  for (sb_y = 0; sb_y < sb_height; sb_y++)
400  for (sb_x = 0; sb_x < sb_width; sb_x++)
401  for (i = 0; i < 16; i++) {
402  x = 4 * sb_x + hilbert_offset[i][0];
403  y = 4 * sb_y + hilbert_offset[i][1];
404 
405  if (x < frag_width && y < frag_height)
406  s->superblock_fragments[j++] = s->fragment_start[plane] +
407  y * frag_width + x;
408  else
409  s->superblock_fragments[j++] = -1;
410  }
411  }
412 
413  return 0; /* successful path out */
414 }
415 
416 /*
417  * This function sets up the dequantization tables used for a particular
418  * frame.
419  */
420 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
421 {
422  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
423  int i, plane, inter, qri, bmi, bmj, qistart;
424 
425  for (inter = 0; inter < 2; inter++) {
426  for (plane = 0; plane < 3; plane++) {
427  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
428  int sum = 0;
429  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
430  sum += s->qr_size[inter][plane][qri];
431  if (s->qps[qpi] <= sum)
432  break;
433  }
434  qistart = sum - s->qr_size[inter][plane][qri];
435  bmi = s->qr_base[inter][plane][qri];
436  bmj = s->qr_base[inter][plane][qri + 1];
437  for (i = 0; i < 64; i++) {
438  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
439  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
440  s->qr_size[inter][plane][qri]) /
441  (2 * s->qr_size[inter][plane][qri]);
442 
443  int qmin = 8 << (inter + !i);
444  int qscale = i ? ac_scale_factor : dc_scale_factor;
445  int qbias = (1 + inter) * 3;
446  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
447  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
448  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
449  }
450  /* all DC coefficients use the same quant so as not to interfere
451  * with DC prediction */
452  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
453  }
454  }
455 }
456 
457 /*
458  * This function initializes the loop filter boundary limits if the frame's
459  * quality index is different from the previous frame's.
460  *
461  * The filter_limit_values may not be larger than 127.
462  */
464 {
465  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
466 }
467 
468 /*
469  * This function unpacks all of the superblock/macroblock/fragment coding
470  * information from the bitstream.
471  */
473 {
474  int superblock_starts[3] = {
475  0, s->u_superblock_start, s->v_superblock_start
476  };
477  int bit = 0;
478  int current_superblock = 0;
479  int current_run = 0;
480  int num_partial_superblocks = 0;
481 
482  int i, j;
483  int current_fragment;
484  int plane;
485  int plane0_num_coded_frags = 0;
486 
487  if (s->keyframe) {
488  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
489  } else {
490  /* unpack the list of partially-coded superblocks */
491  bit = get_bits1(gb) ^ 1;
492  current_run = 0;
493 
494  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
495  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
496  bit = get_bits1(gb);
497  else
498  bit ^= 1;
499 
500  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
502  if (current_run == 34)
503  current_run += get_bits(gb, 12);
504 
505  if (current_run > s->superblock_count - current_superblock) {
506  av_log(s->avctx, AV_LOG_ERROR,
507  "Invalid partially coded superblock run length\n");
508  return -1;
509  }
510 
511  memset(s->superblock_coding + current_superblock, bit, current_run);
512 
513  current_superblock += current_run;
514  if (bit)
515  num_partial_superblocks += current_run;
516  }
517 
518  /* unpack the list of fully coded superblocks if any of the blocks were
519  * not marked as partially coded in the previous step */
520  if (num_partial_superblocks < s->superblock_count) {
521  int superblocks_decoded = 0;
522 
523  current_superblock = 0;
524  bit = get_bits1(gb) ^ 1;
525  current_run = 0;
526 
527  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
528  get_bits_left(gb) > 0) {
529  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
530  bit = get_bits1(gb);
531  else
532  bit ^= 1;
533 
534  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
536  if (current_run == 34)
537  current_run += get_bits(gb, 12);
538 
539  for (j = 0; j < current_run; current_superblock++) {
540  if (current_superblock >= s->superblock_count) {
541  av_log(s->avctx, AV_LOG_ERROR,
542  "Invalid fully coded superblock run length\n");
543  return -1;
544  }
545 
546  /* skip any superblocks already marked as partially coded */
547  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
548  s->superblock_coding[current_superblock] = 2 * bit;
549  j++;
550  }
551  }
552  superblocks_decoded += current_run;
553  }
554  }
555 
556  /* if there were partial blocks, initialize bitstream for
557  * unpacking fragment codings */
558  if (num_partial_superblocks) {
559  current_run = 0;
560  bit = get_bits1(gb);
561  /* toggle the bit because as soon as the first run length is
562  * fetched the bit will be toggled again */
563  bit ^= 1;
564  }
565  }
566 
567  /* figure out which fragments are coded; iterate through each
568  * superblock (all planes) */
569  s->total_num_coded_frags = 0;
570  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
571 
572  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
573  : s->nkf_coded_fragment_list;
574 
575  for (plane = 0; plane < 3; plane++) {
576  int sb_start = superblock_starts[plane];
577  int sb_end = sb_start + (plane ? s->c_superblock_count
578  : s->y_superblock_count);
579  int num_coded_frags = 0;
580 
581  if (s->keyframe) {
582  if (s->num_kf_coded_fragment[plane] == -1) {
583  for (i = sb_start; i < sb_end; i++) {
584  /* iterate through all 16 fragments in a superblock */
585  for (j = 0; j < 16; j++) {
586  /* if the fragment is in bounds, check its coding status */
587  current_fragment = s->superblock_fragments[i * 16 + j];
588  if (current_fragment != -1) {
589  s->coded_fragment_list[plane][num_coded_frags++] =
590  current_fragment;
591  }
592  }
593  }
594  s->num_kf_coded_fragment[plane] = num_coded_frags;
595  } else
596  num_coded_frags = s->num_kf_coded_fragment[plane];
597  } else {
598  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
599  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
600  return AVERROR_INVALIDDATA;
601  }
602  /* iterate through all 16 fragments in a superblock */
603  for (j = 0; j < 16; j++) {
604  /* if the fragment is in bounds, check its coding status */
605  current_fragment = s->superblock_fragments[i * 16 + j];
606  if (current_fragment != -1) {
607  int coded = s->superblock_coding[i];
608 
609  if (coded == SB_PARTIALLY_CODED) {
610  /* fragment may or may not be coded; this is the case
611  * that cares about the fragment coding runs */
612  if (current_run-- == 0) {
613  bit ^= 1;
614  current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2);
615  }
616  coded = bit;
617  }
618 
619  if (coded) {
620  /* default mode; actual mode will be decoded in
621  * the next phase */
622  s->all_fragments[current_fragment].coding_method =
624  s->coded_fragment_list[plane][num_coded_frags++] =
625  current_fragment;
626  } else {
627  /* not coded; copy this fragment from the prior frame */
628  s->all_fragments[current_fragment].coding_method =
629  MODE_COPY;
630  }
631  }
632  }
633  }
634  }
635  if (!plane)
636  plane0_num_coded_frags = num_coded_frags;
637  s->total_num_coded_frags += num_coded_frags;
638  for (i = 0; i < 64; i++)
639  s->num_coded_frags[plane][i] = num_coded_frags;
640  if (plane < 2)
641  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
642  num_coded_frags;
643  }
644  return 0;
645 }
646 
647 #define BLOCK_X (2 * mb_x + (k & 1))
648 #define BLOCK_Y (2 * mb_y + (k >> 1))
649 
650 #if CONFIG_VP4_DECODER
651 /**
652  * @return number of blocks, or > yuv_macroblock_count on error.
653  * return value is always >= 1.
654  */
655 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
656 {
657  int v = 1;
658  int bits;
659  while ((bits = show_bits(gb, 9)) == 0x1ff) {
660  skip_bits(gb, 9);
661  v += 256;
662  if (v > s->yuv_macroblock_count) {
663  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
664  return v;
665  }
666  }
667 #define body(n) { \
668  skip_bits(gb, 2 + n); \
669  v += (1 << n) + get_bits(gb, n); }
670 #define thresh(n) (0x200 - (0x80 >> n))
671 #define else_if(n) else if (bits < thresh(n)) body(n)
672  if (bits < 0x100) {
673  skip_bits(gb, 1);
674  } else if (bits < thresh(0)) {
675  skip_bits(gb, 2);
676  v += 1;
677  }
678  else_if(1)
679  else_if(2)
680  else_if(3)
681  else_if(4)
682  else_if(5)
683  else_if(6)
684  else body(7)
685 #undef body
686 #undef thresh
687 #undef else_if
688  return v;
689 }
690 
691 static int vp4_get_block_pattern(Vp3DecodeContext *s, GetBitContext *gb, int *next_block_pattern_table)
692 {
693  int v = get_vlc2(gb, s->block_pattern_vlc[*next_block_pattern_table].table, 3, 2);
694  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
695  return v + 1;
696 }
697 
698 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
699 {
700  int plane, i, j, k, fragment;
701  int next_block_pattern_table;
702  int bit, current_run, has_partial;
703 
704  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
705 
706  if (s->keyframe)
707  return 0;
708 
709  has_partial = 0;
710  bit = get_bits1(gb);
711  for (i = 0; i < s->yuv_macroblock_count; i += current_run) {
712  if (get_bits_left(gb) <= 0)
713  return AVERROR_INVALIDDATA;
714  current_run = vp4_get_mb_count(s, gb);
715  if (current_run > s->yuv_macroblock_count - i)
716  return -1;
717  memset(s->superblock_coding + i, 2 * bit, current_run);
718  bit ^= 1;
719  has_partial |= bit;
720  }
721 
722  if (has_partial) {
723  if (get_bits_left(gb) <= 0)
724  return AVERROR_INVALIDDATA;
725  bit = get_bits1(gb);
726  current_run = vp4_get_mb_count(s, gb);
727  for (i = 0; i < s->yuv_macroblock_count; i++) {
728  if (!s->superblock_coding[i]) {
729  if (!current_run) {
730  bit ^= 1;
731  current_run = vp4_get_mb_count(s, gb);
732  }
733  s->superblock_coding[i] = bit;
734  current_run--;
735  }
736  }
737  if (current_run) /* handle situation when vp4_get_mb_count() fails */
738  return -1;
739  }
740 
741  next_block_pattern_table = 0;
742  i = 0;
743  for (plane = 0; plane < 3; plane++) {
744  int sb_x, sb_y;
745  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
746  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
747  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
748  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
749  int fragment_width = s->fragment_width[!!plane];
750  int fragment_height = s->fragment_height[!!plane];
751 
752  for (sb_y = 0; sb_y < sb_height; sb_y++) {
753  for (sb_x = 0; sb_x < sb_width; sb_x++) {
754  for (j = 0; j < 4; j++) {
755  int mb_x = 2 * sb_x + (j >> 1);
756  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
757  int mb_coded, pattern, coded;
758 
759  if (mb_x >= mb_width || mb_y >= mb_height)
760  continue;
761 
762  mb_coded = s->superblock_coding[i++];
763 
764  if (mb_coded == SB_FULLY_CODED)
765  pattern = 0xF;
766  else if (mb_coded == SB_PARTIALLY_CODED)
767  pattern = vp4_get_block_pattern(s, gb, &next_block_pattern_table);
768  else
769  pattern = 0;
770 
771  for (k = 0; k < 4; k++) {
772  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
773  continue;
774  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
775  coded = pattern & (8 >> k);
776  /* MODE_INTER_NO_MV is the default for coded fragments.
777  the actual method is decoded in the next phase. */
778  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
779  }
780  }
781  }
782  }
783  }
784  return 0;
785 }
786 #endif
787 
788 /*
789  * This function unpacks all the coding mode data for individual macroblocks
790  * from the bitstream.
791  */
793 {
794  int i, j, k, sb_x, sb_y;
795  int scheme;
796  int current_macroblock;
797  int current_fragment;
798  int coding_mode;
799  int custom_mode_alphabet[CODING_MODE_COUNT];
800  const int *alphabet;
801  Vp3Fragment *frag;
802 
803  if (s->keyframe) {
804  for (i = 0; i < s->fragment_count; i++)
805  s->all_fragments[i].coding_method = MODE_INTRA;
806  } else {
807  /* fetch the mode coding scheme for this frame */
808  scheme = get_bits(gb, 3);
809 
810  /* is it a custom coding scheme? */
811  if (scheme == 0) {
812  for (i = 0; i < 8; i++)
813  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
814  for (i = 0; i < 8; i++)
815  custom_mode_alphabet[get_bits(gb, 3)] = i;
816  alphabet = custom_mode_alphabet;
817  } else
818  alphabet = ModeAlphabet[scheme - 1];
819 
820  /* iterate through all of the macroblocks that contain 1 or more
821  * coded fragments */
822  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
823  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
824  if (get_bits_left(gb) <= 0)
825  return -1;
826 
827  for (j = 0; j < 4; j++) {
828  int mb_x = 2 * sb_x + (j >> 1);
829  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
830  current_macroblock = mb_y * s->macroblock_width + mb_x;
831 
832  if (mb_x >= s->macroblock_width ||
833  mb_y >= s->macroblock_height)
834  continue;
835 
836  /* coding modes are only stored if the macroblock has
837  * at least one luma block coded, otherwise it must be
838  * INTER_NO_MV */
839  for (k = 0; k < 4; k++) {
840  current_fragment = BLOCK_Y *
841  s->fragment_width[0] + BLOCK_X;
842  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
843  break;
844  }
845  if (k == 4) {
846  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
847  continue;
848  }
849 
850  /* mode 7 means get 3 bits for each coding mode */
851  if (scheme == 7)
852  coding_mode = get_bits(gb, 3);
853  else
854  coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
855 
856  s->macroblock_coding[current_macroblock] = coding_mode;
857  for (k = 0; k < 4; k++) {
858  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
859  if (frag->coding_method != MODE_COPY)
860  frag->coding_method = coding_mode;
861  }
862 
863 #define SET_CHROMA_MODES \
864  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
865  frag[s->fragment_start[1]].coding_method = coding_mode; \
866  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
867  frag[s->fragment_start[2]].coding_method = coding_mode;
868 
869  if (s->chroma_y_shift) {
870  frag = s->all_fragments + mb_y *
871  s->fragment_width[1] + mb_x;
873  } else if (s->chroma_x_shift) {
874  frag = s->all_fragments +
875  2 * mb_y * s->fragment_width[1] + mb_x;
876  for (k = 0; k < 2; k++) {
878  frag += s->fragment_width[1];
879  }
880  } else {
881  for (k = 0; k < 4; k++) {
882  frag = s->all_fragments +
883  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
885  }
886  }
887  }
888  }
889  }
890  }
891 
892  return 0;
893 }
894 
895 static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
896 {
897  int v = get_vlc2(gb, s->vp4_mv_vlc[axis][vp4_mv_table_selector[FFABS(last_motion)]].table,
898  VP4_MV_VLC_BITS, 2);
899  return last_motion < 0 ? -v : v;
900 }
901 
902 /*
903  * This function unpacks all the motion vectors for the individual
904  * macroblocks from the bitstream.
905  */
907 {
908  int j, k, sb_x, sb_y;
909  int coding_mode;
910  int motion_x[4];
911  int motion_y[4];
912  int last_motion_x = 0;
913  int last_motion_y = 0;
914  int prior_last_motion_x = 0;
915  int prior_last_motion_y = 0;
916  int last_gold_motion_x = 0;
917  int last_gold_motion_y = 0;
918  int current_macroblock;
919  int current_fragment;
920  int frag;
921 
922  if (s->keyframe)
923  return 0;
924 
925  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
926  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
927 
928  /* iterate through all of the macroblocks that contain 1 or more
929  * coded fragments */
930  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
931  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
932  if (get_bits_left(gb) <= 0)
933  return -1;
934 
935  for (j = 0; j < 4; j++) {
936  int mb_x = 2 * sb_x + (j >> 1);
937  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
938  current_macroblock = mb_y * s->macroblock_width + mb_x;
939 
940  if (mb_x >= s->macroblock_width ||
941  mb_y >= s->macroblock_height ||
942  s->macroblock_coding[current_macroblock] == MODE_COPY)
943  continue;
944 
945  switch (s->macroblock_coding[current_macroblock]) {
946  case MODE_GOLDEN_MV:
947  if (coding_mode == 2) { /* VP4 */
948  last_gold_motion_x = motion_x[0] = vp4_get_mv(s, gb, 0, last_gold_motion_x);
949  last_gold_motion_y = motion_y[0] = vp4_get_mv(s, gb, 1, last_gold_motion_y);
950  break;
951  } /* otherwise fall through */
952  case MODE_INTER_PLUS_MV:
953  /* all 6 fragments use the same motion vector */
954  if (coding_mode == 0) {
955  motion_x[0] = get_vlc2(gb, s->motion_vector_vlc.table,
956  VP3_MV_VLC_BITS, 2);
957  motion_y[0] = get_vlc2(gb, s->motion_vector_vlc.table,
958  VP3_MV_VLC_BITS, 2);
959  } else if (coding_mode == 1) {
960  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
961  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
962  } else { /* VP4 */
963  motion_x[0] = vp4_get_mv(s, gb, 0, last_motion_x);
964  motion_y[0] = vp4_get_mv(s, gb, 1, last_motion_y);
965  }
966 
967  /* vector maintenance, only on MODE_INTER_PLUS_MV */
968  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
969  prior_last_motion_x = last_motion_x;
970  prior_last_motion_y = last_motion_y;
971  last_motion_x = motion_x[0];
972  last_motion_y = motion_y[0];
973  }
974  break;
975 
976  case MODE_INTER_FOURMV:
977  /* vector maintenance */
978  prior_last_motion_x = last_motion_x;
979  prior_last_motion_y = last_motion_y;
980 
981  /* fetch 4 vectors from the bitstream, one for each
982  * Y fragment, then average for the C fragment vectors */
983  for (k = 0; k < 4; k++) {
984  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
985  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
986  if (coding_mode == 0) {
987  motion_x[k] = get_vlc2(gb, s->motion_vector_vlc.table,
988  VP3_MV_VLC_BITS, 2);
989  motion_y[k] = get_vlc2(gb, s->motion_vector_vlc.table,
990  VP3_MV_VLC_BITS, 2);
991  } else if (coding_mode == 1) {
992  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
993  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
994  } else { /* VP4 */
995  motion_x[k] = vp4_get_mv(s, gb, 0, prior_last_motion_x);
996  motion_y[k] = vp4_get_mv(s, gb, 1, prior_last_motion_y);
997  }
998  last_motion_x = motion_x[k];
999  last_motion_y = motion_y[k];
1000  } else {
1001  motion_x[k] = 0;
1002  motion_y[k] = 0;
1003  }
1004  }
1005  break;
1006 
1007  case MODE_INTER_LAST_MV:
1008  /* all 6 fragments use the last motion vector */
1009  motion_x[0] = last_motion_x;
1010  motion_y[0] = last_motion_y;
1011 
1012  /* no vector maintenance (last vector remains the
1013  * last vector) */
1014  break;
1015 
1016  case MODE_INTER_PRIOR_LAST:
1017  /* all 6 fragments use the motion vector prior to the
1018  * last motion vector */
1019  motion_x[0] = prior_last_motion_x;
1020  motion_y[0] = prior_last_motion_y;
1021 
1022  /* vector maintenance */
1023  prior_last_motion_x = last_motion_x;
1024  prior_last_motion_y = last_motion_y;
1025  last_motion_x = motion_x[0];
1026  last_motion_y = motion_y[0];
1027  break;
1028 
1029  default:
1030  /* covers intra, inter without MV, golden without MV */
1031  motion_x[0] = 0;
1032  motion_y[0] = 0;
1033 
1034  /* no vector maintenance */
1035  break;
1036  }
1037 
1038  /* assign the motion vectors to the correct fragments */
1039  for (k = 0; k < 4; k++) {
1040  current_fragment =
1041  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1042  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1043  s->motion_val[0][current_fragment][0] = motion_x[k];
1044  s->motion_val[0][current_fragment][1] = motion_y[k];
1045  } else {
1046  s->motion_val[0][current_fragment][0] = motion_x[0];
1047  s->motion_val[0][current_fragment][1] = motion_y[0];
1048  }
1049  }
1050 
1051  if (s->chroma_y_shift) {
1052  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1053  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1054  motion_x[2] + motion_x[3], 2);
1055  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1056  motion_y[2] + motion_y[3], 2);
1057  }
1058  if (s->version <= 2) {
1059  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1060  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1061  }
1062  frag = mb_y * s->fragment_width[1] + mb_x;
1063  s->motion_val[1][frag][0] = motion_x[0];
1064  s->motion_val[1][frag][1] = motion_y[0];
1065  } else if (s->chroma_x_shift) {
1066  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1067  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1068  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1069  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1070  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1071  } else {
1072  motion_x[1] = motion_x[0];
1073  motion_y[1] = motion_y[0];
1074  }
1075  if (s->version <= 2) {
1076  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1077  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1078  }
1079  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1080  for (k = 0; k < 2; k++) {
1081  s->motion_val[1][frag][0] = motion_x[k];
1082  s->motion_val[1][frag][1] = motion_y[k];
1083  frag += s->fragment_width[1];
1084  }
1085  } else {
1086  for (k = 0; k < 4; k++) {
1087  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1088  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1089  s->motion_val[1][frag][0] = motion_x[k];
1090  s->motion_val[1][frag][1] = motion_y[k];
1091  } else {
1092  s->motion_val[1][frag][0] = motion_x[0];
1093  s->motion_val[1][frag][1] = motion_y[0];
1094  }
1095  }
1096  }
1097  }
1098  }
1099  }
1100 
1101  return 0;
1102 }
1103 
1105 {
1106  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
1107  int num_blocks = s->total_num_coded_frags;
1108 
1109  for (qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1110  i = blocks_decoded = num_blocks_at_qpi = 0;
1111 
1112  bit = get_bits1(gb) ^ 1;
1113  run_length = 0;
1114 
1115  do {
1116  if (run_length == MAXIMUM_LONG_BIT_RUN)
1117  bit = get_bits1(gb);
1118  else
1119  bit ^= 1;
1120 
1121  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table,
1122  SUPERBLOCK_VLC_BITS, 2);
1123  if (run_length == 34)
1124  run_length += get_bits(gb, 12);
1125  blocks_decoded += run_length;
1126 
1127  if (!bit)
1128  num_blocks_at_qpi += run_length;
1129 
1130  for (j = 0; j < run_length; i++) {
1131  if (i >= s->total_num_coded_frags)
1132  return -1;
1133 
1134  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1135  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1136  j++;
1137  }
1138  }
1139  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1140 
1141  num_blocks -= num_blocks_at_qpi;
1142  }
1143 
1144  return 0;
1145 }
1146 
1147 static inline int get_eob_run(GetBitContext *gb, int token)
1148 {
1149  int v = eob_run_table[token].base;
1150  if (eob_run_table[token].bits)
1151  v += get_bits(gb, eob_run_table[token].bits);
1152  return v;
1153 }
1154 
1155 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1156 {
1157  int bits_to_get, zero_run;
1158 
1159  bits_to_get = coeff_get_bits[token];
1160  if (bits_to_get)
1161  bits_to_get = get_bits(gb, bits_to_get);
1162  *coeff = coeff_tables[token][bits_to_get];
1163 
1164  zero_run = zero_run_base[token];
1165  if (zero_run_get_bits[token])
1166  zero_run += get_bits(gb, zero_run_get_bits[token]);
1167 
1168  return zero_run;
1169 }
1170 
1171 /*
1172  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1173  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1174  * data. This function unpacks all the VLCs for either the Y plane or both
1175  * C planes, and is called for DC coefficients or different AC coefficient
1176  * levels (since different coefficient types require different VLC tables.
1177  *
1178  * This function returns a residual eob run. E.g, if a particular token gave
1179  * instructions to EOB the next 5 fragments and there were only 2 fragments
1180  * left in the current fragment range, 3 would be returned so that it could
1181  * be passed into the next call to this same function.
1182  */
1184  VLC *table, int coeff_index,
1185  int plane,
1186  int eob_run)
1187 {
1188  int i, j = 0;
1189  int token;
1190  int zero_run = 0;
1191  int16_t coeff = 0;
1192  int blocks_ended;
1193  int coeff_i = 0;
1194  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1195  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1196 
1197  /* local references to structure members to avoid repeated dereferences */
1198  int *coded_fragment_list = s->coded_fragment_list[plane];
1199  Vp3Fragment *all_fragments = s->all_fragments;
1200  const VLCElem *vlc_table = table->table;
1201 
1202  if (num_coeffs < 0) {
1203  av_log(s->avctx, AV_LOG_ERROR,
1204  "Invalid number of coefficients at level %d\n", coeff_index);
1205  return AVERROR_INVALIDDATA;
1206  }
1207 
1208  if (eob_run > num_coeffs) {
1209  coeff_i =
1210  blocks_ended = num_coeffs;
1211  eob_run -= num_coeffs;
1212  } else {
1213  coeff_i =
1214  blocks_ended = eob_run;
1215  eob_run = 0;
1216  }
1217 
1218  // insert fake EOB token to cover the split between planes or zzi
1219  if (blocks_ended)
1220  dct_tokens[j++] = blocks_ended << 2;
1221 
1222  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1223  /* decode a VLC into a token */
1224  token = get_vlc2(gb, vlc_table, 11, 3);
1225  /* use the token to get a zero run, a coefficient, and an eob run */
1226  if ((unsigned) token <= 6U) {
1227  eob_run = get_eob_run(gb, token);
1228  if (!eob_run)
1229  eob_run = INT_MAX;
1230 
1231  // record only the number of blocks ended in this plane,
1232  // any spill will be recorded in the next plane.
1233  if (eob_run > num_coeffs - coeff_i) {
1234  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1235  blocks_ended += num_coeffs - coeff_i;
1236  eob_run -= num_coeffs - coeff_i;
1237  coeff_i = num_coeffs;
1238  } else {
1239  dct_tokens[j++] = TOKEN_EOB(eob_run);
1240  blocks_ended += eob_run;
1241  coeff_i += eob_run;
1242  eob_run = 0;
1243  }
1244  } else if (token >= 0) {
1245  zero_run = get_coeff(gb, token, &coeff);
1246 
1247  if (zero_run) {
1248  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1249  } else {
1250  // Save DC into the fragment structure. DC prediction is
1251  // done in raster order, so the actual DC can't be in with
1252  // other tokens. We still need the token in dct_tokens[]
1253  // however, or else the structure collapses on itself.
1254  if (!coeff_index)
1255  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1256 
1257  dct_tokens[j++] = TOKEN_COEFF(coeff);
1258  }
1259 
1260  if (coeff_index + zero_run > 64) {
1261  av_log(s->avctx, AV_LOG_DEBUG,
1262  "Invalid zero run of %d with %d coeffs left\n",
1263  zero_run, 64 - coeff_index);
1264  zero_run = 64 - coeff_index;
1265  }
1266 
1267  // zero runs code multiple coefficients,
1268  // so don't try to decode coeffs for those higher levels
1269  for (i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1270  s->num_coded_frags[plane][i]--;
1271  coeff_i++;
1272  } else {
1273  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1274  return -1;
1275  }
1276  }
1277 
1278  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1279  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1280 
1281  // decrement the number of blocks that have higher coefficients for each
1282  // EOB run at this level
1283  if (blocks_ended)
1284  for (i = coeff_index + 1; i < 64; i++)
1285  s->num_coded_frags[plane][i] -= blocks_ended;
1286 
1287  // setup the next buffer
1288  if (plane < 2)
1289  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1290  else if (coeff_index < 63)
1291  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1292 
1293  return eob_run;
1294 }
1295 
1297  int first_fragment,
1298  int fragment_width,
1299  int fragment_height);
1300 /*
1301  * This function unpacks all of the DCT coefficient data from the
1302  * bitstream.
1303  */
1305 {
1306  int i;
1307  int dc_y_table;
1308  int dc_c_table;
1309  int ac_y_table;
1310  int ac_c_table;
1311  int residual_eob_run = 0;
1312  VLC *y_tables[64];
1313  VLC *c_tables[64];
1314 
1315  s->dct_tokens[0][0] = s->dct_tokens_base;
1316 
1317  if (get_bits_left(gb) < 16)
1318  return AVERROR_INVALIDDATA;
1319 
1320  /* fetch the DC table indexes */
1321  dc_y_table = get_bits(gb, 4);
1322  dc_c_table = get_bits(gb, 4);
1323 
1324  /* unpack the Y plane DC coefficients */
1325  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_y_table], 0,
1326  0, residual_eob_run);
1327  if (residual_eob_run < 0)
1328  return residual_eob_run;
1329  if (get_bits_left(gb) < 8)
1330  return AVERROR_INVALIDDATA;
1331 
1332  /* reverse prediction of the Y-plane DC coefficients */
1333  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1334 
1335  /* unpack the C plane DC coefficients */
1336  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1337  1, residual_eob_run);
1338  if (residual_eob_run < 0)
1339  return residual_eob_run;
1340  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1341  2, residual_eob_run);
1342  if (residual_eob_run < 0)
1343  return residual_eob_run;
1344 
1345  /* reverse prediction of the C-plane DC coefficients */
1346  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1347  reverse_dc_prediction(s, s->fragment_start[1],
1348  s->fragment_width[1], s->fragment_height[1]);
1349  reverse_dc_prediction(s, s->fragment_start[2],
1350  s->fragment_width[1], s->fragment_height[1]);
1351  }
1352 
1353  if (get_bits_left(gb) < 8)
1354  return AVERROR_INVALIDDATA;
1355  /* fetch the AC table indexes */
1356  ac_y_table = get_bits(gb, 4);
1357  ac_c_table = get_bits(gb, 4);
1358 
1359  /* build tables of AC VLC tables */
1360  for (i = 1; i <= 5; i++) {
1361  /* AC VLC table group 1 */
1362  y_tables[i] = &s->coeff_vlc[ac_y_table + 16];
1363  c_tables[i] = &s->coeff_vlc[ac_c_table + 16];
1364  }
1365  for (i = 6; i <= 14; i++) {
1366  /* AC VLC table group 2 */
1367  y_tables[i] = &s->coeff_vlc[ac_y_table + 32];
1368  c_tables[i] = &s->coeff_vlc[ac_c_table + 32];
1369  }
1370  for (i = 15; i <= 27; i++) {
1371  /* AC VLC table group 3 */
1372  y_tables[i] = &s->coeff_vlc[ac_y_table + 48];
1373  c_tables[i] = &s->coeff_vlc[ac_c_table + 48];
1374  }
1375  for (i = 28; i <= 63; i++) {
1376  /* AC VLC table group 4 */
1377  y_tables[i] = &s->coeff_vlc[ac_y_table + 64];
1378  c_tables[i] = &s->coeff_vlc[ac_c_table + 64];
1379  }
1380 
1381  /* decode all AC coefficients */
1382  for (i = 1; i <= 63; i++) {
1383  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1384  0, residual_eob_run);
1385  if (residual_eob_run < 0)
1386  return residual_eob_run;
1387 
1388  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1389  1, residual_eob_run);
1390  if (residual_eob_run < 0)
1391  return residual_eob_run;
1392  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1393  2, residual_eob_run);
1394  if (residual_eob_run < 0)
1395  return residual_eob_run;
1396  }
1397 
1398  return 0;
1399 }
1400 
1401 #if CONFIG_VP4_DECODER
1402 /**
1403  * eob_tracker[] is instead of TOKEN_EOB(value)
1404  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1405  *
1406  * @return < 0 on error
1407  */
1408 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1409  VLC *vlc_tables[64],
1410  int plane, int eob_tracker[64], int fragment)
1411 {
1412  int token;
1413  int zero_run = 0;
1414  int16_t coeff = 0;
1415  int coeff_i = 0;
1416  int eob_run;
1417 
1418  while (!eob_tracker[coeff_i]) {
1419  if (get_bits_left(gb) < 1)
1420  return AVERROR_INVALIDDATA;
1421 
1422  token = get_vlc2(gb, vlc_tables[coeff_i]->table, 11, 3);
1423 
1424  /* use the token to get a zero run, a coefficient, and an eob run */
1425  if ((unsigned) token <= 6U) {
1426  eob_run = get_eob_run(gb, token);
1427  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1428  eob_tracker[coeff_i] = eob_run - 1;
1429  return 0;
1430  } else if (token >= 0) {
1431  zero_run = get_coeff(gb, token, &coeff);
1432 
1433  if (zero_run) {
1434  if (coeff_i + zero_run > 64) {
1435  av_log(s->avctx, AV_LOG_DEBUG,
1436  "Invalid zero run of %d with %d coeffs left\n",
1437  zero_run, 64 - coeff_i);
1438  zero_run = 64 - coeff_i;
1439  }
1440  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1441  coeff_i += zero_run;
1442  } else {
1443  if (!coeff_i)
1444  s->all_fragments[fragment].dc = coeff;
1445 
1446  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1447  }
1448  coeff_i++;
1449  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1450  return 0; /* stop */
1451  } else {
1452  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1453  return -1;
1454  }
1455  }
1456  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1457  eob_tracker[coeff_i]--;
1458  return 0;
1459 }
1460 
1461 static void vp4_dc_predictor_reset(VP4Predictor *p)
1462 {
1463  p->dc = 0;
1464  p->type = VP4_DC_UNDEFINED;
1465 }
1466 
1467 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1468 {
1469  int i, j;
1470 
1471  for (i = 0; i < 4; i++)
1472  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1473 
1474  for (j = 1; j < 5; j++)
1475  for (i = 0; i < 4; i++)
1476  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1477 }
1478 
1479 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1480 {
1481  int i;
1482 
1483  for (i = 0; i < 4; i++)
1484  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1485 
1486  for (i = 1; i < 5; i++)
1487  dc_pred[i][0] = dc_pred[i][4];
1488 }
1489 
1490 /* note: dc_pred points to the current block */
1491 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1492 {
1493  int count = 0;
1494  int dc = 0;
1495 
1496  if (dc_pred[-6].type == type) {
1497  dc += dc_pred[-6].dc;
1498  count++;
1499  }
1500 
1501  if (dc_pred[6].type == type) {
1502  dc += dc_pred[6].dc;
1503  count++;
1504  }
1505 
1506  if (count != 2 && dc_pred[-1].type == type) {
1507  dc += dc_pred[-1].dc;
1508  count++;
1509  }
1510 
1511  if (count != 2 && dc_pred[1].type == type) {
1512  dc += dc_pred[1].dc;
1513  count++;
1514  }
1515 
1516  /* using division instead of shift to correctly handle negative values */
1517  return count == 2 ? dc / 2 : last_dc[type];
1518 }
1519 
1520 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1521 {
1522  int plane, i;
1523  int16_t *base = s->dct_tokens_base;
1524  for (plane = 0; plane < 3; plane++) {
1525  for (i = 0; i < 64; i++) {
1526  s->dct_tokens[plane][i] = base;
1527  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1528  }
1529  }
1530 }
1531 
1532 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1533 {
1534  int i, j;
1535  int dc_y_table;
1536  int dc_c_table;
1537  int ac_y_table;
1538  int ac_c_table;
1539  VLC *tables[2][64];
1540  int plane, sb_y, sb_x;
1541  int eob_tracker[64];
1542  VP4Predictor dc_pred[6][6];
1543  int last_dc[NB_VP4_DC_TYPES];
1544 
1545  if (get_bits_left(gb) < 16)
1546  return AVERROR_INVALIDDATA;
1547 
1548  /* fetch the DC table indexes */
1549  dc_y_table = get_bits(gb, 4);
1550  dc_c_table = get_bits(gb, 4);
1551 
1552  ac_y_table = get_bits(gb, 4);
1553  ac_c_table = get_bits(gb, 4);
1554 
1555  /* build tables of DC/AC VLC tables */
1556 
1557  /* DC table group */
1558  tables[0][0] = &s->coeff_vlc[dc_y_table];
1559  tables[1][0] = &s->coeff_vlc[dc_c_table];
1560  for (i = 1; i <= 5; i++) {
1561  /* AC VLC table group 1 */
1562  tables[0][i] = &s->coeff_vlc[ac_y_table + 16];
1563  tables[1][i] = &s->coeff_vlc[ac_c_table + 16];
1564  }
1565  for (i = 6; i <= 14; i++) {
1566  /* AC VLC table group 2 */
1567  tables[0][i] = &s->coeff_vlc[ac_y_table + 32];
1568  tables[1][i] = &s->coeff_vlc[ac_c_table + 32];
1569  }
1570  for (i = 15; i <= 27; i++) {
1571  /* AC VLC table group 3 */
1572  tables[0][i] = &s->coeff_vlc[ac_y_table + 48];
1573  tables[1][i] = &s->coeff_vlc[ac_c_table + 48];
1574  }
1575  for (i = 28; i <= 63; i++) {
1576  /* AC VLC table group 4 */
1577  tables[0][i] = &s->coeff_vlc[ac_y_table + 64];
1578  tables[1][i] = &s->coeff_vlc[ac_c_table + 64];
1579  }
1580 
1581  vp4_set_tokens_base(s);
1582 
1583  memset(last_dc, 0, sizeof(last_dc));
1584 
1585  for (plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1586  memset(eob_tracker, 0, sizeof(eob_tracker));
1587 
1588  /* initialise dc prediction */
1589  for (i = 0; i < s->fragment_width[!!plane]; i++)
1590  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1591 
1592  for (j = 0; j < 6; j++)
1593  for (i = 0; i < 6; i++)
1594  vp4_dc_predictor_reset(&dc_pred[j][i]);
1595 
1596  for (sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1597  for (sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1598  vp4_dc_pred_before(s, dc_pred, sb_x);
1599  for (j = 0; j < 16; j++) {
1600  int hx = hilbert_offset[j][0];
1601  int hy = hilbert_offset[j][1];
1602  int x = 4 * sb_x + hx;
1603  int y = 4 * sb_y + hy;
1604  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1605  int fragment, dc_block_type;
1606 
1607  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1608  continue;
1609 
1610  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1611 
1612  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1613  continue;
1614 
1615  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1616  return -1;
1617 
1618  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1619 
1620  s->all_fragments[fragment].dc +=
1621  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1622 
1623  this_dc_pred->type = dc_block_type,
1624  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1625  }
1626  vp4_dc_pred_after(s, dc_pred, sb_x);
1627  }
1628  }
1629  }
1630 
1631  vp4_set_tokens_base(s);
1632 
1633  return 0;
1634 }
1635 #endif
1636 
1637 /*
1638  * This function reverses the DC prediction for each coded fragment in
1639  * the frame. Much of this function is adapted directly from the original
1640  * VP3 source code.
1641  */
1642 #define COMPATIBLE_FRAME(x) \
1643  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1644 #define DC_COEFF(u) s->all_fragments[u].dc
1645 
1647  int first_fragment,
1648  int fragment_width,
1649  int fragment_height)
1650 {
1651 #define PUL 8
1652 #define PU 4
1653 #define PUR 2
1654 #define PL 1
1655 
1656  int x, y;
1657  int i = first_fragment;
1658 
1659  int predicted_dc;
1660 
1661  /* DC values for the left, up-left, up, and up-right fragments */
1662  int vl, vul, vu, vur;
1663 
1664  /* indexes for the left, up-left, up, and up-right fragments */
1665  int l, ul, u, ur;
1666 
1667  /*
1668  * The 6 fields mean:
1669  * 0: up-left multiplier
1670  * 1: up multiplier
1671  * 2: up-right multiplier
1672  * 3: left multiplier
1673  */
1674  static const int predictor_transform[16][4] = {
1675  { 0, 0, 0, 0 },
1676  { 0, 0, 0, 128 }, // PL
1677  { 0, 0, 128, 0 }, // PUR
1678  { 0, 0, 53, 75 }, // PUR|PL
1679  { 0, 128, 0, 0 }, // PU
1680  { 0, 64, 0, 64 }, // PU |PL
1681  { 0, 128, 0, 0 }, // PU |PUR
1682  { 0, 0, 53, 75 }, // PU |PUR|PL
1683  { 128, 0, 0, 0 }, // PUL
1684  { 0, 0, 0, 128 }, // PUL|PL
1685  { 64, 0, 64, 0 }, // PUL|PUR
1686  { 0, 0, 53, 75 }, // PUL|PUR|PL
1687  { 0, 128, 0, 0 }, // PUL|PU
1688  { -104, 116, 0, 116 }, // PUL|PU |PL
1689  { 24, 80, 24, 0 }, // PUL|PU |PUR
1690  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1691  };
1692 
1693  /* This table shows which types of blocks can use other blocks for
1694  * prediction. For example, INTRA is the only mode in this table to
1695  * have a frame number of 0. That means INTRA blocks can only predict
1696  * from other INTRA blocks. There are 2 golden frame coding types;
1697  * blocks encoding in these modes can only predict from other blocks
1698  * that were encoded with these 1 of these 2 modes. */
1699  static const unsigned char compatible_frame[9] = {
1700  1, /* MODE_INTER_NO_MV */
1701  0, /* MODE_INTRA */
1702  1, /* MODE_INTER_PLUS_MV */
1703  1, /* MODE_INTER_LAST_MV */
1704  1, /* MODE_INTER_PRIOR_MV */
1705  2, /* MODE_USING_GOLDEN */
1706  2, /* MODE_GOLDEN_MV */
1707  1, /* MODE_INTER_FOUR_MV */
1708  3 /* MODE_COPY */
1709  };
1710  int current_frame_type;
1711 
1712  /* there is a last DC predictor for each of the 3 frame types */
1713  short last_dc[3];
1714 
1715  int transform = 0;
1716 
1717  vul =
1718  vu =
1719  vur =
1720  vl = 0;
1721  last_dc[0] =
1722  last_dc[1] =
1723  last_dc[2] = 0;
1724 
1725  /* for each fragment row... */
1726  for (y = 0; y < fragment_height; y++) {
1727  /* for each fragment in a row... */
1728  for (x = 0; x < fragment_width; x++, i++) {
1729 
1730  /* reverse prediction if this block was coded */
1731  if (s->all_fragments[i].coding_method != MODE_COPY) {
1732  current_frame_type =
1733  compatible_frame[s->all_fragments[i].coding_method];
1734 
1735  transform = 0;
1736  if (x) {
1737  l = i - 1;
1738  vl = DC_COEFF(l);
1739  if (COMPATIBLE_FRAME(l))
1740  transform |= PL;
1741  }
1742  if (y) {
1743  u = i - fragment_width;
1744  vu = DC_COEFF(u);
1745  if (COMPATIBLE_FRAME(u))
1746  transform |= PU;
1747  if (x) {
1748  ul = i - fragment_width - 1;
1749  vul = DC_COEFF(ul);
1750  if (COMPATIBLE_FRAME(ul))
1751  transform |= PUL;
1752  }
1753  if (x + 1 < fragment_width) {
1754  ur = i - fragment_width + 1;
1755  vur = DC_COEFF(ur);
1756  if (COMPATIBLE_FRAME(ur))
1757  transform |= PUR;
1758  }
1759  }
1760 
1761  if (transform == 0) {
1762  /* if there were no fragments to predict from, use last
1763  * DC saved */
1764  predicted_dc = last_dc[current_frame_type];
1765  } else {
1766  /* apply the appropriate predictor transform */
1767  predicted_dc =
1768  (predictor_transform[transform][0] * vul) +
1769  (predictor_transform[transform][1] * vu) +
1770  (predictor_transform[transform][2] * vur) +
1771  (predictor_transform[transform][3] * vl);
1772 
1773  predicted_dc /= 128;
1774 
1775  /* check for outranging on the [ul u l] and
1776  * [ul u ur l] predictors */
1777  if ((transform == 15) || (transform == 13)) {
1778  if (FFABS(predicted_dc - vu) > 128)
1779  predicted_dc = vu;
1780  else if (FFABS(predicted_dc - vl) > 128)
1781  predicted_dc = vl;
1782  else if (FFABS(predicted_dc - vul) > 128)
1783  predicted_dc = vul;
1784  }
1785  }
1786 
1787  /* at long last, apply the predictor */
1788  DC_COEFF(i) += predicted_dc;
1789  /* save the DC */
1790  last_dc[current_frame_type] = DC_COEFF(i);
1791  }
1792  }
1793  }
1794 }
1795 
1796 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1797  int ystart, int yend)
1798 {
1799  int x, y;
1800  int *bounding_values = s->bounding_values_array + 127;
1801 
1802  int width = s->fragment_width[!!plane];
1803  int height = s->fragment_height[!!plane];
1804  int fragment = s->fragment_start[plane] + ystart * width;
1805  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1806  uint8_t *plane_data = s->current_frame.f->data[plane];
1807  if (!s->flipped_image)
1808  stride = -stride;
1809  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1810 
1811  for (y = ystart; y < yend; y++) {
1812  for (x = 0; x < width; x++) {
1813  /* This code basically just deblocks on the edges of coded blocks.
1814  * However, it has to be much more complicated because of the
1815  * brain damaged deblock ordering used in VP3/Theora. Order matters
1816  * because some pixels get filtered twice. */
1817  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1818  /* do not perform left edge filter for left columns frags */
1819  if (x > 0) {
1820  s->vp3dsp.h_loop_filter(
1821  plane_data + 8 * x,
1822  stride, bounding_values);
1823  }
1824 
1825  /* do not perform top edge filter for top row fragments */
1826  if (y > 0) {
1827  s->vp3dsp.v_loop_filter(
1828  plane_data + 8 * x,
1829  stride, bounding_values);
1830  }
1831 
1832  /* do not perform right edge filter for right column
1833  * fragments or if right fragment neighbor is also coded
1834  * in this frame (it will be filtered in next iteration) */
1835  if ((x < width - 1) &&
1836  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1837  s->vp3dsp.h_loop_filter(
1838  plane_data + 8 * x + 8,
1839  stride, bounding_values);
1840  }
1841 
1842  /* do not perform bottom edge filter for bottom row
1843  * fragments or if bottom fragment neighbor is also coded
1844  * in this frame (it will be filtered in the next row) */
1845  if ((y < height - 1) &&
1846  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1847  s->vp3dsp.v_loop_filter(
1848  plane_data + 8 * x + 8 * stride,
1849  stride, bounding_values);
1850  }
1851  }
1852 
1853  fragment++;
1854  }
1855  plane_data += 8 * stride;
1856  }
1857 }
1858 
1859 /**
1860  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1861  * for the next block in coding order
1862  */
1863 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1864  int plane, int inter, int16_t block[64])
1865 {
1866  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1867  uint8_t *perm = s->idct_scantable;
1868  int i = 0;
1869 
1870  do {
1871  int token = *s->dct_tokens[plane][i];
1872  switch (token & 3) {
1873  case 0: // EOB
1874  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1875  s->dct_tokens[plane][i]++;
1876  else
1877  *s->dct_tokens[plane][i] = token & ~3;
1878  goto end;
1879  case 1: // zero run
1880  s->dct_tokens[plane][i]++;
1881  i += (token >> 2) & 0x7f;
1882  if (i > 63) {
1883  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1884  return i;
1885  }
1886  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1887  i++;
1888  break;
1889  case 2: // coeff
1890  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1891  s->dct_tokens[plane][i++]++;
1892  break;
1893  default: // shouldn't happen
1894  return i;
1895  }
1896  } while (i < 64);
1897  // return value is expected to be a valid level
1898  i--;
1899 end:
1900  // the actual DC+prediction is in the fragment structure
1901  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1902  return i;
1903 }
1904 
1905 /**
1906  * called when all pixels up to row y are complete
1907  */
1909 {
1910  int h, cy, i;
1912 
1913  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1914  int y_flipped = s->flipped_image ? s->height - y : y;
1915 
1916  /* At the end of the frame, report INT_MAX instead of the height of
1917  * the frame. This makes the other threads' ff_thread_await_progress()
1918  * calls cheaper, because they don't have to clip their values. */
1919  ff_thread_report_progress(&s->current_frame,
1920  y_flipped == s->height ? INT_MAX
1921  : y_flipped - 1,
1922  0);
1923  }
1924 
1925  if (!s->avctx->draw_horiz_band)
1926  return;
1927 
1928  h = y - s->last_slice_end;
1929  s->last_slice_end = y;
1930  y -= h;
1931 
1932  if (!s->flipped_image)
1933  y = s->height - y - h;
1934 
1935  cy = y >> s->chroma_y_shift;
1936  offset[0] = s->current_frame.f->linesize[0] * y;
1937  offset[1] = s->current_frame.f->linesize[1] * cy;
1938  offset[2] = s->current_frame.f->linesize[2] * cy;
1939  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1940  offset[i] = 0;
1941 
1942  emms_c();
1943  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1944 }
1945 
1946 /**
1947  * Wait for the reference frame of the current fragment.
1948  * The progress value is in luma pixel rows.
1949  */
1951  int motion_y, int y)
1952 {
1953  const ThreadFrame *ref_frame;
1954  int ref_row;
1955  int border = motion_y & 1;
1956 
1957  if (fragment->coding_method == MODE_USING_GOLDEN ||
1958  fragment->coding_method == MODE_GOLDEN_MV)
1959  ref_frame = &s->golden_frame;
1960  else
1961  ref_frame = &s->last_frame;
1962 
1963  ref_row = y + (motion_y >> 1);
1964  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1965 
1966  ff_thread_await_progress(ref_frame, ref_row, 0);
1967 }
1968 
1969 #if CONFIG_VP4_DECODER
1970 /**
1971  * @return non-zero if temp (edge_emu_buffer) was populated
1972  */
1973 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1974  uint8_t * motion_source, int stride, int src_x, int src_y, uint8_t *temp)
1975 {
1976  int motion_shift = plane ? 4 : 2;
1977  int subpel_mask = plane ? 3 : 1;
1978  int *bounding_values = s->bounding_values_array + 127;
1979 
1980  int i;
1981  int x, y;
1982  int x2, y2;
1983  int x_subpel, y_subpel;
1984  int x_offset, y_offset;
1985 
1986  int block_width = plane ? 8 : 16;
1987  int plane_width = s->width >> (plane && s->chroma_x_shift);
1988  int plane_height = s->height >> (plane && s->chroma_y_shift);
1989 
1990 #define loop_stride 12
1991  uint8_t loop[12 * loop_stride];
1992 
1993  /* using division instead of shift to correctly handle negative values */
1994  x = 8 * bx + motion_x / motion_shift;
1995  y = 8 * by + motion_y / motion_shift;
1996 
1997  x_subpel = motion_x & subpel_mask;
1998  y_subpel = motion_y & subpel_mask;
1999 
2000  if (x_subpel || y_subpel) {
2001  x--;
2002  y--;
2003 
2004  if (x_subpel)
2005  x = FFMIN(x, x + FFSIGN(motion_x));
2006 
2007  if (y_subpel)
2008  y = FFMIN(y, y + FFSIGN(motion_y));
2009 
2010  x2 = x + block_width;
2011  y2 = y + block_width;
2012 
2013  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
2014  return 0;
2015 
2016  x_offset = (-(x + 2) & 7) + 2;
2017  y_offset = (-(y + 2) & 7) + 2;
2018 
2019  if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2020  return 0;
2021 
2022  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2023  loop_stride, stride,
2024  12, 12, src_x - 1, src_y - 1,
2025  plane_width,
2026  plane_height);
2027 
2028  if (x_offset <= 8 + x_subpel)
2029  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2030 
2031  if (y_offset <= 8 + y_subpel)
2032  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2033 
2034  } else {
2035 
2036  x_offset = -x & 7;
2037  y_offset = -y & 7;
2038 
2039  if (!x_offset && !y_offset)
2040  return 0;
2041 
2042  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2043  loop_stride, stride,
2044  12, 12, src_x - 1, src_y - 1,
2045  plane_width,
2046  plane_height);
2047 
2048 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2049  if ((uintptr_t)(ptr) & 7) \
2050  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2051  else \
2052  s->vp3dsp.name(ptr, stride, bounding_values);
2053 
2054  if (x_offset)
2055  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2056 
2057  if (y_offset)
2058  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2059  }
2060 
2061  for (i = 0; i < 9; i++)
2062  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2063 
2064  return 1;
2065 }
2066 #endif
2067 
2068 /*
2069  * Perform the final rendering for a particular slice of data.
2070  * The slice number ranges from 0..(c_superblock_height - 1).
2071  */
2072 static void render_slice(Vp3DecodeContext *s, int slice)
2073 {
2074  int x, y, i, j, fragment;
2075  int16_t *block = s->block;
2076  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2077  int motion_halfpel_index;
2078  uint8_t *motion_source;
2079  int plane, first_pixel;
2080 
2081  if (slice >= s->c_superblock_height)
2082  return;
2083 
2084  for (plane = 0; plane < 3; plane++) {
2085  uint8_t *output_plane = s->current_frame.f->data[plane] +
2086  s->data_offset[plane];
2087  uint8_t *last_plane = s->last_frame.f->data[plane] +
2088  s->data_offset[plane];
2089  uint8_t *golden_plane = s->golden_frame.f->data[plane] +
2090  s->data_offset[plane];
2091  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2092  int plane_width = s->width >> (plane && s->chroma_x_shift);
2093  int plane_height = s->height >> (plane && s->chroma_y_shift);
2094  int8_t(*motion_val)[2] = s->motion_val[!!plane];
2095 
2096  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
2097  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2098  int slice_width = plane ? s->c_superblock_width
2099  : s->y_superblock_width;
2100 
2101  int fragment_width = s->fragment_width[!!plane];
2102  int fragment_height = s->fragment_height[!!plane];
2103  int fragment_start = s->fragment_start[plane];
2104 
2105  int do_await = !plane && HAVE_THREADS &&
2106  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2107 
2108  if (!s->flipped_image)
2109  stride = -stride;
2110  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2111  continue;
2112 
2113  /* for each superblock row in the slice (both of them)... */
2114  for (; sb_y < slice_height; sb_y++) {
2115  /* for each superblock in a row... */
2116  for (sb_x = 0; sb_x < slice_width; sb_x++) {
2117  /* for each block in a superblock... */
2118  for (j = 0; j < 16; j++) {
2119  x = 4 * sb_x + hilbert_offset[j][0];
2120  y = 4 * sb_y + hilbert_offset[j][1];
2121  fragment = y * fragment_width + x;
2122 
2123  i = fragment_start + fragment;
2124 
2125  // bounds check
2126  if (x >= fragment_width || y >= fragment_height)
2127  continue;
2128 
2129  first_pixel = 8 * y * stride + 8 * x;
2130 
2131  if (do_await &&
2132  s->all_fragments[i].coding_method != MODE_INTRA)
2133  await_reference_row(s, &s->all_fragments[i],
2134  motion_val[fragment][1],
2135  (16 * y) >> s->chroma_y_shift);
2136 
2137  /* transform if this block was coded */
2138  if (s->all_fragments[i].coding_method != MODE_COPY) {
2139  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2140  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2141  motion_source = golden_plane;
2142  else
2143  motion_source = last_plane;
2144 
2145  motion_source += first_pixel;
2146  motion_halfpel_index = 0;
2147 
2148  /* sort out the motion vector if this fragment is coded
2149  * using a motion vector method */
2150  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2151  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2152  int src_x, src_y;
2153  int standard_mc = 1;
2154  motion_x = motion_val[fragment][0];
2155  motion_y = motion_val[fragment][1];
2156 #if CONFIG_VP4_DECODER
2157  if (plane && s->version >= 2) {
2158  motion_x = (motion_x >> 1) | (motion_x & 1);
2159  motion_y = (motion_y >> 1) | (motion_y & 1);
2160  }
2161 #endif
2162 
2163  src_x = (motion_x >> 1) + 8 * x;
2164  src_y = (motion_y >> 1) + 8 * y;
2165 
2166  motion_halfpel_index = motion_x & 0x01;
2167  motion_source += (motion_x >> 1);
2168 
2169  motion_halfpel_index |= (motion_y & 0x01) << 1;
2170  motion_source += ((motion_y >> 1) * stride);
2171 
2172 #if CONFIG_VP4_DECODER
2173  if (s->version >= 2) {
2174  uint8_t *temp = s->edge_emu_buffer;
2175  if (stride < 0)
2176  temp -= 8 * stride;
2177  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2178  motion_source = temp;
2179  standard_mc = 0;
2180  }
2181  }
2182 #endif
2183 
2184  if (standard_mc && (
2185  src_x < 0 || src_y < 0 ||
2186  src_x + 9 >= plane_width ||
2187  src_y + 9 >= plane_height)) {
2188  uint8_t *temp = s->edge_emu_buffer;
2189  if (stride < 0)
2190  temp -= 8 * stride;
2191 
2192  s->vdsp.emulated_edge_mc(temp, motion_source,
2193  stride, stride,
2194  9, 9, src_x, src_y,
2195  plane_width,
2196  plane_height);
2197  motion_source = temp;
2198  }
2199  }
2200 
2201  /* first, take care of copying a block from either the
2202  * previous or the golden frame */
2203  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2204  /* Note, it is possible to implement all MC cases
2205  * with put_no_rnd_pixels_l2 which would look more
2206  * like the VP3 source but this would be slower as
2207  * put_no_rnd_pixels_tab is better optimized */
2208  if (motion_halfpel_index != 3) {
2209  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2210  output_plane + first_pixel,
2211  motion_source, stride, 8);
2212  } else {
2213  /* d is 0 if motion_x and _y have the same sign,
2214  * else -1 */
2215  int d = (motion_x ^ motion_y) >> 31;
2216  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2217  motion_source - d,
2218  motion_source + stride + 1 + d,
2219  stride, 8);
2220  }
2221  }
2222 
2223  /* invert DCT and place (or add) in final output */
2224 
2225  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2226  vp3_dequant(s, s->all_fragments + i,
2227  plane, 0, block);
2228  s->vp3dsp.idct_put(output_plane + first_pixel,
2229  stride,
2230  block);
2231  } else {
2232  if (vp3_dequant(s, s->all_fragments + i,
2233  plane, 1, block)) {
2234  s->vp3dsp.idct_add(output_plane + first_pixel,
2235  stride,
2236  block);
2237  } else {
2238  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2239  stride, block);
2240  }
2241  }
2242  } else {
2243  /* copy directly from the previous frame */
2244  s->hdsp.put_pixels_tab[1][0](
2245  output_plane + first_pixel,
2246  last_plane + first_pixel,
2247  stride, 8);
2248  }
2249  }
2250  }
2251 
2252  // Filter up to the last row in the superblock row
2253  if (s->version < 2 && !s->skip_loop_filter)
2254  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2255  FFMIN(4 * sb_y + 3, fragment_height - 1));
2256  }
2257  }
2258 
2259  /* this looks like a good place for slice dispatch... */
2260  /* algorithm:
2261  * if (slice == s->macroblock_height - 1)
2262  * dispatch (both last slice & 2nd-to-last slice);
2263  * else if (slice > 0)
2264  * dispatch (slice - 1);
2265  */
2266 
2267  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2268  s->height - 16));
2269 }
2270 
2271 /// Allocate tables for per-frame data in Vp3DecodeContext
2273 {
2274  Vp3DecodeContext *s = avctx->priv_data;
2275  int y_fragment_count, c_fragment_count;
2276 
2277  free_tables(avctx);
2278 
2279  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2280  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2281 
2282  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2283  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2284  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2285 
2286  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2287  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2288  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2289 
2290  s->dct_tokens_base = av_calloc(s->fragment_count,
2291  64 * sizeof(*s->dct_tokens_base));
2292  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2293  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2294 
2295  /* work out the block mapping tables */
2296  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2297  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2298 
2299  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2300 
2301  if (!s->superblock_coding || !s->all_fragments ||
2302  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2303  !s->nkf_coded_fragment_list ||
2304  !s->superblock_fragments || !s->macroblock_coding ||
2305  !s->dc_pred_row ||
2306  !s->motion_val[0] || !s->motion_val[1]) {
2307  return -1;
2308  }
2309 
2311 
2312  return 0;
2313 }
2314 
2316 {
2317  s->current_frame.f = av_frame_alloc();
2318  s->last_frame.f = av_frame_alloc();
2319  s->golden_frame.f = av_frame_alloc();
2320 
2321  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f)
2322  return AVERROR(ENOMEM);
2323 
2324  return 0;
2325 }
2326 
2328 {
2329  Vp3DecodeContext *s = avctx->priv_data;
2330  int i, inter, plane, ret;
2331  int c_width;
2332  int c_height;
2333  int y_fragment_count, c_fragment_count;
2334 #if CONFIG_VP4_DECODER
2335  int j;
2336 #endif
2337 
2338  ret = init_frames(s);
2339  if (ret < 0)
2340  return ret;
2341 
2342  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2343  s->version = 3;
2344 #if !CONFIG_VP4_DECODER
2345  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2347 #endif
2348  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2349  s->version = 0;
2350  else
2351  s->version = 1;
2352 
2353  s->avctx = avctx;
2354  s->width = FFALIGN(avctx->coded_width, 16);
2355  s->height = FFALIGN(avctx->coded_height, 16);
2356  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2357  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2359  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2360  ff_videodsp_init(&s->vdsp, 8);
2361  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2362 
2363  for (i = 0; i < 64; i++) {
2364 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2365  s->idct_permutation[i] = TRANSPOSE(i);
2366  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2367 #undef TRANSPOSE
2368  }
2369 
2370  /* initialize to an impossible value which will force a recalculation
2371  * in the first frame decode */
2372  for (i = 0; i < 3; i++)
2373  s->qps[i] = -1;
2374 
2375  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2376  if (ret)
2377  return ret;
2378 
2379  s->y_superblock_width = (s->width + 31) / 32;
2380  s->y_superblock_height = (s->height + 31) / 32;
2381  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2382 
2383  /* work out the dimensions for the C planes */
2384  c_width = s->width >> s->chroma_x_shift;
2385  c_height = s->height >> s->chroma_y_shift;
2386  s->c_superblock_width = (c_width + 31) / 32;
2387  s->c_superblock_height = (c_height + 31) / 32;
2388  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2389 
2390  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2391  s->u_superblock_start = s->y_superblock_count;
2392  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2393 
2394  s->macroblock_width = (s->width + 15) / 16;
2395  s->macroblock_height = (s->height + 15) / 16;
2396  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2397  s->c_macroblock_width = (c_width + 15) / 16;
2398  s->c_macroblock_height = (c_height + 15) / 16;
2399  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2400  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2401 
2402  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2403  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2404  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2405  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2406 
2407  /* fragment count covers all 8x8 blocks for all 3 planes */
2408  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2409  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2410  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2411  s->fragment_start[1] = y_fragment_count;
2412  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2413 
2414  if (!s->theora_tables) {
2415  const uint8_t (*bias_tabs)[32][2];
2416 
2417  for (i = 0; i < 64; i++) {
2418  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2419  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2420  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2421  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2422  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2423  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2424  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2425  }
2426 
2427  for (inter = 0; inter < 2; inter++) {
2428  for (plane = 0; plane < 3; plane++) {
2429  s->qr_count[inter][plane] = 1;
2430  s->qr_size[inter][plane][0] = 63;
2431  s->qr_base[inter][plane][0] =
2432  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2433  }
2434  }
2435 
2436  /* init VLC tables */
2437  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2438  for (int i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2439  ret = ff_init_vlc_from_lengths(&s->coeff_vlc[i], 11, 32,
2440  &bias_tabs[i][0][1], 2,
2441  &bias_tabs[i][0][0], 2, 1,
2442  0, 0, avctx);
2443  if (ret < 0)
2444  return ret;
2445  }
2446  } else {
2447  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2448  const HuffTable *tab = &s->huffman_table[i];
2449 
2450  ret = ff_init_vlc_from_lengths(&s->coeff_vlc[i], 11, tab->nb_entries,
2451  &tab->entries[0].len, sizeof(*tab->entries),
2452  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2453  0, 0, avctx);
2454  if (ret < 0)
2455  return ret;
2456  }
2457  }
2458 
2459  ret = ff_init_vlc_from_lengths(&s->superblock_run_length_vlc, SUPERBLOCK_VLC_BITS, 34,
2461  NULL, 0, 0, 1, 0, avctx);
2462  if (ret < 0)
2463  return ret;
2464 
2465  ret = ff_init_vlc_from_lengths(&s->fragment_run_length_vlc, 5, 30,
2467  NULL, 0, 0, 0, 0, avctx);
2468  if (ret < 0)
2469  return ret;
2470 
2471  ret = ff_init_vlc_from_lengths(&s->mode_code_vlc, 3, 8,
2472  mode_code_vlc_len, 1,
2473  NULL, 0, 0, 0, 0, avctx);
2474  if (ret < 0)
2475  return ret;
2476 
2477  ret = ff_init_vlc_from_lengths(&s->motion_vector_vlc, VP3_MV_VLC_BITS, 63,
2478  &motion_vector_vlc_table[0][1], 2,
2479  &motion_vector_vlc_table[0][0], 2, 1,
2480  -31, 0, avctx);
2481  if (ret < 0)
2482  return ret;
2483 
2484 #if CONFIG_VP4_DECODER
2485  for (j = 0; j < 2; j++)
2486  for (i = 0; i < 7; i++) {
2487  ret = ff_init_vlc_from_lengths(&s->vp4_mv_vlc[j][i], VP4_MV_VLC_BITS, 63,
2488  &vp4_mv_vlc[j][i][0][1], 2,
2489  &vp4_mv_vlc[j][i][0][0], 2, 1, -31,
2490  0, avctx);
2491  if (ret < 0)
2492  return ret;
2493  }
2494 
2495  /* version >= 2 */
2496  for (i = 0; i < 2; i++)
2497  if ((ret = init_vlc(&s->block_pattern_vlc[i], 3, 14,
2498  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2499  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0)) < 0)
2500  return ret;
2501 #endif
2502 
2503  return allocate_tables(avctx);
2504 }
2505 
2506 /// Release and shuffle frames after decode finishes
2507 static int update_frames(AVCodecContext *avctx)
2508 {
2509  Vp3DecodeContext *s = avctx->priv_data;
2510  int ret = 0;
2511 
2512  /* shuffle frames (last = current) */
2513  ff_thread_release_ext_buffer(avctx, &s->last_frame);
2514  ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
2515  if (ret < 0)
2516  goto fail;
2517 
2518  if (s->keyframe) {
2519  ff_thread_release_ext_buffer(avctx, &s->golden_frame);
2520  ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
2521  }
2522 
2523 fail:
2524  ff_thread_release_ext_buffer(avctx, &s->current_frame);
2525  return ret;
2526 }
2527 
2528 #if HAVE_THREADS
2529 static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
2530 {
2531  ff_thread_release_ext_buffer(s->avctx, dst);
2532  if (src->f->data[0])
2533  return ff_thread_ref_frame(dst, src);
2534  return 0;
2535 }
2536 
2537 static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
2538 {
2539  int ret;
2540  if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
2541  (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
2542  (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
2543  return ret;
2544  return 0;
2545 }
2546 
2547 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2548 {
2549  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
2550  int qps_changed = 0, i, err;
2551 
2552  if (!s1->current_frame.f->data[0] ||
2553  s->width != s1->width || s->height != s1->height) {
2554  if (s != s1)
2555  ref_frames(s, s1);
2556  return -1;
2557  }
2558 
2559  if (s != s1) {
2560  // copy previous frame data
2561  if ((err = ref_frames(s, s1)) < 0)
2562  return err;
2563 
2564  s->keyframe = s1->keyframe;
2565 
2566  // copy qscale data if necessary
2567  for (i = 0; i < 3; i++) {
2568  if (s->qps[i] != s1->qps[1]) {
2569  qps_changed = 1;
2570  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2571  }
2572  }
2573 
2574  if (s->qps[0] != s1->qps[0])
2575  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2576  sizeof(s->bounding_values_array));
2577 
2578  if (qps_changed) {
2579  memcpy(s->qps, s1->qps, sizeof(s->qps));
2580  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2581  s->nqps = s1->nqps;
2582  }
2583  }
2584 
2585  return update_frames(dst);
2586 }
2587 #endif
2588 
2590  int *got_frame, AVPacket *avpkt)
2591 {
2592  const uint8_t *buf = avpkt->data;
2593  int buf_size = avpkt->size;
2594  Vp3DecodeContext *s = avctx->priv_data;
2595  GetBitContext gb;
2596  int i, ret;
2597 
2598  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2599  return ret;
2600 
2601 #if CONFIG_THEORA_DECODER
2602  if (s->theora && get_bits1(&gb)) {
2603  int type = get_bits(&gb, 7);
2604  skip_bits_long(&gb, 6*8); /* "theora" */
2605 
2606  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2607  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2608  return AVERROR_PATCHWELCOME;
2609  }
2610  if (type == 0) {
2611  vp3_decode_end(avctx);
2612  ret = theora_decode_header(avctx, &gb);
2613 
2614  if (ret >= 0)
2615  ret = vp3_decode_init(avctx);
2616  if (ret < 0) {
2617  vp3_decode_end(avctx);
2618  return ret;
2619  }
2620  return buf_size;
2621  } else if (type == 2) {
2622  vp3_decode_end(avctx);
2623  ret = theora_decode_tables(avctx, &gb);
2624  if (ret >= 0)
2625  ret = vp3_decode_init(avctx);
2626  if (ret < 0) {
2627  vp3_decode_end(avctx);
2628  return ret;
2629  }
2630  return buf_size;
2631  }
2632 
2633  av_log(avctx, AV_LOG_ERROR,
2634  "Header packet passed to frame decoder, skipping\n");
2635  return -1;
2636  }
2637 #endif
2638 
2639  s->keyframe = !get_bits1(&gb);
2640  if (!s->all_fragments) {
2641  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2642  return -1;
2643  }
2644  if (!s->theora)
2645  skip_bits(&gb, 1);
2646  for (i = 0; i < 3; i++)
2647  s->last_qps[i] = s->qps[i];
2648 
2649  s->nqps = 0;
2650  do {
2651  s->qps[s->nqps++] = get_bits(&gb, 6);
2652  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2653  for (i = s->nqps; i < 3; i++)
2654  s->qps[i] = -1;
2655 
2656  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2657  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2658  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2659 
2660  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2661  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2662  : AVDISCARD_NONKEY);
2663 
2664  if (s->qps[0] != s->last_qps[0])
2666 
2667  for (i = 0; i < s->nqps; i++)
2668  // reinit all dequantizers if the first one changed, because
2669  // the DC of the first quantizer must be used for all matrices
2670  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2671  init_dequantizer(s, i);
2672 
2673  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2674  return buf_size;
2675 
2676  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2678  s->current_frame.f->key_frame = s->keyframe;
2679  if ((ret = ff_thread_get_ext_buffer(avctx, &s->current_frame,
2680  AV_GET_BUFFER_FLAG_REF)) < 0)
2681  goto error;
2682 
2683  if (!s->edge_emu_buffer) {
2684  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2685  if (!s->edge_emu_buffer) {
2686  ret = AVERROR(ENOMEM);
2687  goto error;
2688  }
2689  }
2690 
2691  if (s->keyframe) {
2692  if (!s->theora) {
2693  skip_bits(&gb, 4); /* width code */
2694  skip_bits(&gb, 4); /* height code */
2695  if (s->version) {
2696  int version = get_bits(&gb, 5);
2697 #if !CONFIG_VP4_DECODER
2698  if (version >= 2) {
2699  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2701  }
2702 #endif
2703  s->version = version;
2704  if (avctx->frame_num == 0)
2705  av_log(s->avctx, AV_LOG_DEBUG,
2706  "VP version: %d\n", s->version);
2707  }
2708  }
2709  if (s->version || s->theora) {
2710  if (get_bits1(&gb))
2711  av_log(s->avctx, AV_LOG_ERROR,
2712  "Warning, unsupported keyframe coding type?!\n");
2713  skip_bits(&gb, 2); /* reserved? */
2714 
2715 #if CONFIG_VP4_DECODER
2716  if (s->version >= 2) {
2717  int mb_height, mb_width;
2718  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2719 
2720  mb_height = get_bits(&gb, 8);
2721  mb_width = get_bits(&gb, 8);
2722  if (mb_height != s->macroblock_height ||
2723  mb_width != s->macroblock_width)
2724  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2725 
2726  mb_width_mul = get_bits(&gb, 5);
2727  mb_width_div = get_bits(&gb, 3);
2728  mb_height_mul = get_bits(&gb, 5);
2729  mb_height_div = get_bits(&gb, 3);
2730  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2731  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2732 
2733  if (get_bits(&gb, 2))
2734  avpriv_request_sample(s->avctx, "unknown bits");
2735  }
2736 #endif
2737  }
2738  } else {
2739  if (!s->golden_frame.f->data[0]) {
2740  av_log(s->avctx, AV_LOG_WARNING,
2741  "vp3: first frame not a keyframe\n");
2742 
2743  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2744  if ((ret = ff_thread_get_ext_buffer(avctx, &s->golden_frame,
2745  AV_GET_BUFFER_FLAG_REF)) < 0)
2746  goto error;
2747  ff_thread_release_ext_buffer(avctx, &s->last_frame);
2748  if ((ret = ff_thread_ref_frame(&s->last_frame,
2749  &s->golden_frame)) < 0)
2750  goto error;
2751  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2752  }
2753  }
2754 
2755  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2756  ff_thread_finish_setup(avctx);
2757 
2758  if (s->version < 2) {
2759  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2760  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2761  goto error;
2762  }
2763 #if CONFIG_VP4_DECODER
2764  } else {
2765  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2766  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2767  goto error;
2768  }
2769 #endif
2770  }
2771  if ((ret = unpack_modes(s, &gb)) < 0) {
2772  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2773  goto error;
2774  }
2775  if (ret = unpack_vectors(s, &gb)) {
2776  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2777  goto error;
2778  }
2779  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2780  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2781  goto error;
2782  }
2783 
2784  if (s->version < 2) {
2785  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2786  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2787  goto error;
2788  }
2789 #if CONFIG_VP4_DECODER
2790  } else {
2791  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2792  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2793  goto error;
2794  }
2795 #endif
2796  }
2797 
2798  for (i = 0; i < 3; i++) {
2799  int height = s->height >> (i && s->chroma_y_shift);
2800  if (s->flipped_image)
2801  s->data_offset[i] = 0;
2802  else
2803  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2804  }
2805 
2806  s->last_slice_end = 0;
2807  for (i = 0; i < s->c_superblock_height; i++)
2808  render_slice(s, i);
2809 
2810  // filter the last row
2811  if (s->version < 2)
2812  for (i = 0; i < 3; i++) {
2813  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2814  apply_loop_filter(s, i, row, row + 1);
2815  }
2816  vp3_draw_horiz_band(s, s->height);
2817 
2818  /* output frame, offset as needed */
2819  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2820  return ret;
2821 
2822  frame->crop_left = s->offset_x;
2823  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2824  frame->crop_top = s->offset_y;
2825  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2826 
2827  *got_frame = 1;
2828 
2829  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
2830  ret = update_frames(avctx);
2831  if (ret < 0)
2832  return ret;
2833  }
2834 
2835  return buf_size;
2836 
2837 error:
2838  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2839 
2840  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2841  av_frame_unref(s->current_frame.f);
2842 
2843  return ret;
2844 }
2845 
2846 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2847  AVCodecContext *avctx)
2848 {
2849  if (get_bits1(gb)) {
2850  int token;
2851  if (huff->nb_entries >= 32) { /* overflow */
2852  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2853  return -1;
2854  }
2855  token = get_bits(gb, 5);
2856  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2857  length, huff->nb_entries, token);
2858  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2859  } else {
2860  /* The following bound follows from the fact that nb_entries <= 32. */
2861  if (length >= 31) { /* overflow */
2862  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2863  return -1;
2864  }
2865  length++;
2866  if (read_huffman_tree(huff, gb, length, avctx))
2867  return -1;
2868  if (read_huffman_tree(huff, gb, length, avctx))
2869  return -1;
2870  }
2871  return 0;
2872 }
2873 
2874 #if CONFIG_THEORA_DECODER
2875 static const enum AVPixelFormat theora_pix_fmts[4] = {
2877 };
2878 
2879 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2880 {
2881  Vp3DecodeContext *s = avctx->priv_data;
2882  int visible_width, visible_height, colorspace;
2883  uint8_t offset_x = 0, offset_y = 0;
2884  int ret;
2885  AVRational fps, aspect;
2886 
2887  if (get_bits_left(gb) < 206)
2888  return AVERROR_INVALIDDATA;
2889 
2890  s->theora_header = 0;
2891  s->theora = get_bits(gb, 24);
2892  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2893  if (!s->theora) {
2894  s->theora = 1;
2895  avpriv_request_sample(s->avctx, "theora 0");
2896  }
2897 
2898  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2899  * but previous versions have the image flipped relative to vp3 */
2900  if (s->theora < 0x030200) {
2901  s->flipped_image = 1;
2902  av_log(avctx, AV_LOG_DEBUG,
2903  "Old (<alpha3) Theora bitstream, flipped image\n");
2904  }
2905 
2906  visible_width =
2907  s->width = get_bits(gb, 16) << 4;
2908  visible_height =
2909  s->height = get_bits(gb, 16) << 4;
2910 
2911  if (s->theora >= 0x030200) {
2912  visible_width = get_bits(gb, 24);
2913  visible_height = get_bits(gb, 24);
2914 
2915  offset_x = get_bits(gb, 8); /* offset x */
2916  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2917  }
2918 
2919  /* sanity check */
2920  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2921  visible_width + offset_x > s->width ||
2922  visible_height + offset_y > s->height) {
2923  av_log(avctx, AV_LOG_ERROR,
2924  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2925  visible_width, visible_height, offset_x, offset_y,
2926  s->width, s->height);
2927  return AVERROR_INVALIDDATA;
2928  }
2929 
2930  fps.num = get_bits_long(gb, 32);
2931  fps.den = get_bits_long(gb, 32);
2932  if (fps.num && fps.den) {
2933  if (fps.num < 0 || fps.den < 0) {
2934  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2935  return AVERROR_INVALIDDATA;
2936  }
2937  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2938  fps.den, fps.num, 1 << 30);
2939  }
2940 
2941  aspect.num = get_bits(gb, 24);
2942  aspect.den = get_bits(gb, 24);
2943  if (aspect.num && aspect.den) {
2945  &avctx->sample_aspect_ratio.den,
2946  aspect.num, aspect.den, 1 << 30);
2947  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2948  }
2949 
2950  if (s->theora < 0x030200)
2951  skip_bits(gb, 5); /* keyframe frequency force */
2952  colorspace = get_bits(gb, 8);
2953  skip_bits(gb, 24); /* bitrate */
2954 
2955  skip_bits(gb, 6); /* quality hint */
2956 
2957  if (s->theora >= 0x030200) {
2958  skip_bits(gb, 5); /* keyframe frequency force */
2959  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2960  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2961  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2962  return AVERROR_INVALIDDATA;
2963  }
2964  skip_bits(gb, 3); /* reserved */
2965  } else
2966  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2967 
2968  ret = ff_set_dimensions(avctx, s->width, s->height);
2969  if (ret < 0)
2970  return ret;
2971  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2972  avctx->width = visible_width;
2973  avctx->height = visible_height;
2974  // translate offsets from theora axis ([0,0] lower left)
2975  // to normal axis ([0,0] upper left)
2976  s->offset_x = offset_x;
2977  s->offset_y = s->height - visible_height - offset_y;
2978  }
2979 
2980  if (colorspace == 1)
2982  else if (colorspace == 2)
2984 
2985  if (colorspace == 1 || colorspace == 2) {
2986  avctx->colorspace = AVCOL_SPC_BT470BG;
2987  avctx->color_trc = AVCOL_TRC_BT709;
2988  }
2989 
2990  s->theora_header = 1;
2991  return 0;
2992 }
2993 
2994 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2995 {
2996  Vp3DecodeContext *s = avctx->priv_data;
2997  int i, n, matrices, inter, plane, ret;
2998 
2999  if (!s->theora_header)
3000  return AVERROR_INVALIDDATA;
3001 
3002  if (s->theora >= 0x030200) {
3003  n = get_bits(gb, 3);
3004  /* loop filter limit values table */
3005  if (n)
3006  for (i = 0; i < 64; i++)
3007  s->filter_limit_values[i] = get_bits(gb, n);
3008  }
3009 
3010  if (s->theora >= 0x030200)
3011  n = get_bits(gb, 4) + 1;
3012  else
3013  n = 16;
3014  /* quality threshold table */
3015  for (i = 0; i < 64; i++)
3016  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3017 
3018  if (s->theora >= 0x030200)
3019  n = get_bits(gb, 4) + 1;
3020  else
3021  n = 16;
3022  /* dc scale factor table */
3023  for (i = 0; i < 64; i++)
3024  s->coded_dc_scale_factor[0][i] =
3025  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3026 
3027  if (s->theora >= 0x030200)
3028  matrices = get_bits(gb, 9) + 1;
3029  else
3030  matrices = 3;
3031 
3032  if (matrices > 384) {
3033  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3034  return -1;
3035  }
3036 
3037  for (n = 0; n < matrices; n++)
3038  for (i = 0; i < 64; i++)
3039  s->base_matrix[n][i] = get_bits(gb, 8);
3040 
3041  for (inter = 0; inter <= 1; inter++) {
3042  for (plane = 0; plane <= 2; plane++) {
3043  int newqr = 1;
3044  if (inter || plane > 0)
3045  newqr = get_bits1(gb);
3046  if (!newqr) {
3047  int qtj, plj;
3048  if (inter && get_bits1(gb)) {
3049  qtj = 0;
3050  plj = plane;
3051  } else {
3052  qtj = (3 * inter + plane - 1) / 3;
3053  plj = (plane + 2) % 3;
3054  }
3055  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3056  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3057  sizeof(s->qr_size[0][0]));
3058  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3059  sizeof(s->qr_base[0][0]));
3060  } else {
3061  int qri = 0;
3062  int qi = 0;
3063 
3064  for (;;) {
3065  i = get_bits(gb, av_log2(matrices - 1) + 1);
3066  if (i >= matrices) {
3067  av_log(avctx, AV_LOG_ERROR,
3068  "invalid base matrix index\n");
3069  return -1;
3070  }
3071  s->qr_base[inter][plane][qri] = i;
3072  if (qi >= 63)
3073  break;
3074  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3075  s->qr_size[inter][plane][qri++] = i;
3076  qi += i;
3077  }
3078 
3079  if (qi > 63) {
3080  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3081  return -1;
3082  }
3083  s->qr_count[inter][plane] = qri;
3084  }
3085  }
3086  }
3087 
3088  /* Huffman tables */
3089  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3090  s->huffman_table[i].nb_entries = 0;
3091  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3092  return ret;
3093  }
3094 
3095  s->theora_tables = 1;
3096 
3097  return 0;
3098 }
3099 
3100 static av_cold int theora_decode_init(AVCodecContext *avctx)
3101 {
3102  Vp3DecodeContext *s = avctx->priv_data;
3103  GetBitContext gb;
3104  int ptype;
3105  const uint8_t *header_start[3];
3106  int header_len[3];
3107  int i;
3108  int ret;
3109 
3110  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3111 
3112  s->theora = 1;
3113 
3114  if (!avctx->extradata_size) {
3115  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3116  return -1;
3117  }
3118 
3120  42, header_start, header_len) < 0) {
3121  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3122  return -1;
3123  }
3124 
3125  for (i = 0; i < 3; i++) {
3126  if (header_len[i] <= 0)
3127  continue;
3128  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3129  if (ret < 0)
3130  return ret;
3131 
3132  ptype = get_bits(&gb, 8);
3133 
3134  if (!(ptype & 0x80)) {
3135  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3136 // return -1;
3137  }
3138 
3139  // FIXME: Check for this as well.
3140  skip_bits_long(&gb, 6 * 8); /* "theora" */
3141 
3142  switch (ptype) {
3143  case 0x80:
3144  if (theora_decode_header(avctx, &gb) < 0)
3145  return -1;
3146  break;
3147  case 0x81:
3148 // FIXME: is this needed? it breaks sometimes
3149 // theora_decode_comments(avctx, gb);
3150  break;
3151  case 0x82:
3152  if (theora_decode_tables(avctx, &gb))
3153  return -1;
3154  break;
3155  default:
3156  av_log(avctx, AV_LOG_ERROR,
3157  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3158  break;
3159  }
3160  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3161  av_log(avctx, AV_LOG_WARNING,
3162  "%d bits left in packet %X\n",
3163  get_bits_left(&gb), ptype);
3164  if (s->theora < 0x030200)
3165  break;
3166  }
3167 
3168  return vp3_decode_init(avctx);
3169 }
3170 
3171 const FFCodec ff_theora_decoder = {
3172  .p.name = "theora",
3173  CODEC_LONG_NAME("Theora"),
3174  .p.type = AVMEDIA_TYPE_VIDEO,
3175  .p.id = AV_CODEC_ID_THEORA,
3176  .priv_data_size = sizeof(Vp3DecodeContext),
3177  .init = theora_decode_init,
3178  .close = vp3_decode_end,
3180  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3182  .flush = vp3_decode_flush,
3183  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3184  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3186 };
3187 #endif
3188 
3190  .p.name = "vp3",
3191  CODEC_LONG_NAME("On2 VP3"),
3192  .p.type = AVMEDIA_TYPE_VIDEO,
3193  .p.id = AV_CODEC_ID_VP3,
3194  .priv_data_size = sizeof(Vp3DecodeContext),
3195  .init = vp3_decode_init,
3196  .close = vp3_decode_end,
3198  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3200  .flush = vp3_decode_flush,
3201  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3202  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3204 };
3205 
3206 #if CONFIG_VP4_DECODER
3207 const FFCodec ff_vp4_decoder = {
3208  .p.name = "vp4",
3209  CODEC_LONG_NAME("On2 VP4"),
3210  .p.type = AVMEDIA_TYPE_VIDEO,
3211  .p.id = AV_CODEC_ID_VP4,
3212  .priv_data_size = sizeof(Vp3DecodeContext),
3213  .init = vp3_decode_init,
3214  .close = vp3_decode_end,
3216  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3218  .flush = vp3_decode_flush,
3219  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3220  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3222 };
3223 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2272
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:268
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:64
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:95
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:226
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
Vp3DecodeContext::mode_code_vlc
VLC mode_code_vlc
Definition: vp3.c:285
VP3DSPContext
Definition: vp3dsp.h:25
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp3_decode_flush
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:334
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:972
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:216
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:82
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
VP4Predictor
Definition: vp3.c:160
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:187
HuffEntry::len
uint8_t len
Definition: exr.c:95
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:142
VP4Predictor::dc
int dc
Definition: vp3.c:161
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:411
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2846
PUR
#define PUR
vp3dsp.h
Vp3DecodeContext::motion_vector_vlc
VLC motion_vector_vlc
Definition: vp3.c:286
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:995
Vp3DecodeContext::superblock_run_length_vlc
VLC superblock_run_length_vlc
Definition: vp3.c:282
AVPacket::data
uint8_t * data
Definition: packet.h:374
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:43
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3189
table
static const uint16_t table[]
Definition: prosumer.c:205
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:223
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:308
FFCodec
Definition: codec_internal.h:127
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:65
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:472
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2072
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1359
Vp3DecodeContext::height
int height
Definition: vp3.c:180
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1863
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:114
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:354
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
fragment
Definition: dashdec.c:34
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:203
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+2]
Definition: vp3.c:309
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1750
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:297
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1155
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:236
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:188
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:648
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:201
fail
#define fail()
Definition: checkasm.h:134
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:86
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
GetBitContext
Definition: get_bits.h:107
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:81
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:202
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:227
Vp3DecodeContext::theora
int theora
Definition: vp3.c:178
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:340
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:276
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:260
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:149
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:988
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:134
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:544
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:143
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:221
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
s
#define s(width, name)
Definition: cbs_vp9.c:256
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:463
Vp3DecodeContext::fragment_run_length_vlc
VLC fragment_run_length_vlc
Definition: vp3.c:283
Vp3DecodeContext::vp4_mv_vlc
VLC vp4_mv_vlc[2][7]
Definition: vp3.c:287
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
s1
#define s1
Definition: regdef.h:38
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:173
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:386
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:70
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:69
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:66
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1646
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1304
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:96
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:46
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:540
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:83
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:212
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:186
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:420
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:85
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:204
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:228
NULL
#define NULL
Definition: coverity.c:32
Vp3DecodeContext::block_pattern_vlc
VLC block_pattern_vlc[2]
Definition: vp3.c:284
init_frames
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:2315
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
HuffTable
Used to store optimal huffman encoding results.
Definition: mjpegenc_huffman.h:69
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:792
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1183
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:200
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
ff_init_vlc_from_lengths
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:328
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:178
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:261
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:144
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:194
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
update_frames
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2507
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:198
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:299
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:631
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:237
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1644
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:190
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:192
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
tab
static const uint8_t tab[16]
Definition: rka.c:668
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:220
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:268
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:71
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:513
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
AVPacket::size
int size
Definition: packet.h:375
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:906
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vp4_decoder
const FFCodec ff_vp4_decoder
vp4_get_mv
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:895
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:146
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
VLCElem
Definition: vlc.h:27
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:331
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:257
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:233
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:196
Vp3DecodeContext::current_frame
ThreadFrame current_frame
Definition: vp3.c:184
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:191
height
#define height
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:181
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:225
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:301
version
version
Definition: libkvazaar.c:313
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:560
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1513
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:177
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:275
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:185
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:79
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1796
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:213
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:303
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:361
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:214
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2589
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:211
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:58
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:208
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:205
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:930
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:57
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:272
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:206
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:538
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:178
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:318
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:80
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:267
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::golden_frame
ThreadFrame golden_frame
Definition: vp3.c:182
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:181
BLOCK_X
#define BLOCK_X
Definition: vp3.c:647
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:89
Vp3DecodeContext
Definition: vp3.c:176
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:84
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:60
AVCodecContext
main external API structure.
Definition: avcodec.h:426
ThreadFrame
Definition: threadframe.h:27
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1908
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:683
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1950
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VLC
Definition: vlc.h:31
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:234
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1027
HuffEntry
Definition: exr.c:94
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:248
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:101
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:145
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:78
VideoDSPContext
Definition: videodsp.h:40
HuffEntry::sym
uint8_t sym
Definition: vp3.c:168
Vp3DecodeContext::coeff_vlc
VLC coeff_vlc[5 *16]
Definition: vp3.c:280
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:209
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1642
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
Vp3DecodeContext::last_frame
ThreadFrame last_frame
Definition: vp3.c:183
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:224
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:321
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1147
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:172
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:306
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
VP4Predictor::type
int type
Definition: vp3.c:162
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2327
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:235
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:56
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:219
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:180
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:274
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1104
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:238
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:346
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:76
Vp3DecodeContext::version
int version
Definition: vp3.c:179
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:230
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:193
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:311
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:207
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:258
Vp3Fragment
Definition: vp3.c:63
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:795
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:197
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:291
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:189
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:259
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:215
eob_run_table
static const struct @183 eob_run_table[7]