FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/mem_internal.h"
41 #include "libavutil/thread.h"
42 
43 #include "avcodec.h"
44 #include "codec_internal.h"
45 #include "decode.h"
46 #include "get_bits.h"
47 #include "hpeldsp.h"
48 #include "internal.h"
49 #include "jpegquanttables.h"
50 #include "mathops.h"
51 #include "refstruct.h"
52 #include "thread.h"
53 #include "threadframe.h"
54 #include "videodsp.h"
55 #include "vp3data.h"
56 #include "vp4data.h"
57 #include "vp3dsp.h"
58 #include "xiph.h"
59 
60 #define VP3_MV_VLC_BITS 6
61 #define VP4_MV_VLC_BITS 6
62 #define SUPERBLOCK_VLC_BITS 6
63 
64 #define FRAGMENT_PIXELS 8
65 
66 // FIXME split things out into their own arrays
67 typedef struct Vp3Fragment {
68  int16_t dc;
69  uint8_t coding_method;
70  uint8_t qpi;
71 } Vp3Fragment;
72 
73 #define SB_NOT_CODED 0
74 #define SB_PARTIALLY_CODED 1
75 #define SB_FULLY_CODED 2
76 
77 // This is the maximum length of a single long bit run that can be encoded
78 // for superblock coding or block qps. Theora special-cases this to read a
79 // bit instead of flipping the current bit to allow for runs longer than 4129.
80 #define MAXIMUM_LONG_BIT_RUN 4129
81 
82 #define MODE_INTER_NO_MV 0
83 #define MODE_INTRA 1
84 #define MODE_INTER_PLUS_MV 2
85 #define MODE_INTER_LAST_MV 3
86 #define MODE_INTER_PRIOR_LAST 4
87 #define MODE_USING_GOLDEN 5
88 #define MODE_GOLDEN_MV 6
89 #define MODE_INTER_FOURMV 7
90 #define CODING_MODE_COUNT 8
91 
92 /* special internal mode */
93 #define MODE_COPY 8
94 
95 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
96 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
97 
98 
99 /* There are 6 preset schemes, plus a free-form scheme */
100 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
101  /* scheme 1: Last motion vector dominates */
106 
107  /* scheme 2 */
112 
113  /* scheme 3 */
118 
119  /* scheme 4 */
124 
125  /* scheme 5: No motion vector dominates */
130 
131  /* scheme 6 */
136 };
137 
138 static const uint8_t hilbert_offset[16][2] = {
139  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
140  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
141  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
142  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
143 };
144 
145 enum {
151 };
152 
153 static const uint8_t vp4_pred_block_type_map[8] = {
162 };
163 
164 static VLCElem superblock_run_length_vlc[88]; /* version < 2 */
165 static VLCElem fragment_run_length_vlc[56]; /* version < 2 */
166 static VLCElem motion_vector_vlc[112]; /* version < 2 */
167 
168 // The VP4 tables reuse this vlc.
169 static VLCElem mode_code_vlc[24 + 2108 * CONFIG_VP4_DECODER];
170 
171 #if CONFIG_VP4_DECODER
172 static const VLCElem *vp4_mv_vlc_table[2][7]; /* version >= 2 */
173 static const VLCElem *block_pattern_vlc[2]; /* version >= 2 */
174 #endif
175 
176 typedef struct {
177  int dc;
178  int type;
179 } VP4Predictor;
180 
181 #define MIN_DEQUANT_VAL 2
182 
183 typedef struct HuffEntry {
184  uint8_t len, sym;
185 } HuffEntry;
186 
187 typedef struct HuffTable {
189  uint8_t nb_entries;
190 } HuffTable;
191 
192 typedef struct CoeffVLCs {
193  const VLCElem *vlc_tabs[80];
194  VLC vlcs[80];
195 } CoeffVLCs;
196 
197 typedef struct Vp3DecodeContext {
200  int version;
201  int width, height;
206  int keyframe;
207  uint8_t idct_permutation[64];
208  uint8_t idct_scantable[64];
212  DECLARE_ALIGNED(16, int16_t, block)[64];
216 
217  int qps[3];
218  int nqps;
219  int last_qps[3];
220 
230  unsigned char *superblock_coding;
231 
232  int macroblock_count; /* y macroblock count */
238  int yuv_macroblock_count; /* y+u+v macroblock count */
239 
243 
246  int data_offset[3];
247  uint8_t offset_x;
248  uint8_t offset_y;
250 
251  int8_t (*motion_val[2])[2];
252 
253  /* tables */
254  uint16_t coded_dc_scale_factor[2][64];
255  uint32_t coded_ac_scale_factor[64];
256  uint8_t base_matrix[384][64];
257  uint8_t qr_count[2][3];
258  uint8_t qr_size[2][3][64];
259  uint16_t qr_base[2][3][64];
260 
261  /**
262  * This is a list of all tokens in bitstream order. Reordering takes place
263  * by pulling from each level during IDCT. As a consequence, IDCT must be
264  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
265  * otherwise. The 32 different tokens with up to 12 bits of extradata are
266  * collapsed into 3 types, packed as follows:
267  * (from the low to high bits)
268  *
269  * 2 bits: type (0,1,2)
270  * 0: EOB run, 14 bits for run length (12 needed)
271  * 1: zero run, 7 bits for run length
272  * 7 bits for the next coefficient (3 needed)
273  * 2: coefficient, 14 bits (11 needed)
274  *
275  * Coefficients are signed, so are packed in the highest bits for automatic
276  * sign extension.
277  */
278  int16_t *dct_tokens[3][64];
279  int16_t *dct_tokens_base;
280 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
281 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
282 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
283 
284  /**
285  * number of blocks that contain DCT coefficients at
286  * the given level or higher
287  */
288  int num_coded_frags[3][64];
290 
291  /* this is a list of indexes into the all_fragments array indicating
292  * which of the fragments are coded */
294 
298 
299  /**
300  * The first 16 of the following VLCs are for the dc coefficients;
301  * the others are four groups of 16 VLCs each for ac coefficients.
302  * This is a RefStruct reference to share these VLCs between threads.
303  */
305 
306  /* these arrays need to be on 16-byte boundaries since SSE2 operations
307  * index into them */
308  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
309 
310  /* This table contains superblock_count * 16 entries. Each set of 16
311  * numbers corresponds to the fragment indexes 0..15 of the superblock.
312  * An entry will be -1 to indicate that no entry corresponds to that
313  * index. */
315 
316  /* This is an array that indicates how a particular macroblock
317  * is coded. */
318  unsigned char *macroblock_coding;
319 
320  uint8_t *edge_emu_buffer;
321 
322  /* Huffman decode */
324 
325  uint8_t filter_limit_values[64];
327 
328  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
330 
331 /************************************************************************
332  * VP3 specific functions
333  ************************************************************************/
334 
335 static av_cold void free_tables(AVCodecContext *avctx)
336 {
337  Vp3DecodeContext *s = avctx->priv_data;
338 
339  av_freep(&s->superblock_coding);
340  av_freep(&s->all_fragments);
341  av_freep(&s->nkf_coded_fragment_list);
342  av_freep(&s->kf_coded_fragment_list);
343  av_freep(&s->dct_tokens_base);
344  av_freep(&s->superblock_fragments);
345  av_freep(&s->macroblock_coding);
346  av_freep(&s->dc_pred_row);
347  av_freep(&s->motion_val[0]);
348  av_freep(&s->motion_val[1]);
349 }
350 
351 static void vp3_decode_flush(AVCodecContext *avctx)
352 {
353  Vp3DecodeContext *s = avctx->priv_data;
354 
355  if (s->golden_frame.f)
356  ff_thread_release_ext_buffer(&s->golden_frame);
357  if (s->last_frame.f)
358  ff_thread_release_ext_buffer(&s->last_frame);
359  if (s->current_frame.f)
360  ff_thread_release_ext_buffer(&s->current_frame);
361 }
362 
364 {
365  Vp3DecodeContext *s = avctx->priv_data;
366 
367  free_tables(avctx);
368  av_freep(&s->edge_emu_buffer);
369 
370  s->theora_tables = 0;
371 
372  /* release all frames */
373  vp3_decode_flush(avctx);
374  av_frame_free(&s->current_frame.f);
375  av_frame_free(&s->last_frame.f);
376  av_frame_free(&s->golden_frame.f);
377 
378  ff_refstruct_unref(&s->coeff_vlc);
379 
380  return 0;
381 }
382 
383 /**
384  * This function sets up all of the various blocks mappings:
385  * superblocks <-> fragments, macroblocks <-> fragments,
386  * superblocks <-> macroblocks
387  *
388  * @return 0 is successful; returns 1 if *anything* went wrong.
389  */
391 {
392  int j = 0;
393 
394  for (int plane = 0; plane < 3; plane++) {
395  int sb_width = plane ? s->c_superblock_width
396  : s->y_superblock_width;
397  int sb_height = plane ? s->c_superblock_height
398  : s->y_superblock_height;
399  int frag_width = s->fragment_width[!!plane];
400  int frag_height = s->fragment_height[!!plane];
401 
402  for (int sb_y = 0; sb_y < sb_height; sb_y++)
403  for (int sb_x = 0; sb_x < sb_width; sb_x++)
404  for (int i = 0; i < 16; i++) {
405  int x = 4 * sb_x + hilbert_offset[i][0];
406  int y = 4 * sb_y + hilbert_offset[i][1];
407 
408  if (x < frag_width && y < frag_height)
409  s->superblock_fragments[j++] = s->fragment_start[plane] +
410  y * frag_width + x;
411  else
412  s->superblock_fragments[j++] = -1;
413  }
414  }
415 
416  return 0; /* successful path out */
417 }
418 
419 /*
420  * This function sets up the dequantization tables used for a particular
421  * frame.
422  */
423 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
424 {
425  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
426 
427  for (int inter = 0; inter < 2; inter++) {
428  for (int plane = 0; plane < 3; plane++) {
429  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
430  int sum = 0, bmi, bmj, qistart, qri;
431  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
432  sum += s->qr_size[inter][plane][qri];
433  if (s->qps[qpi] <= sum)
434  break;
435  }
436  qistart = sum - s->qr_size[inter][plane][qri];
437  bmi = s->qr_base[inter][plane][qri];
438  bmj = s->qr_base[inter][plane][qri + 1];
439  for (int i = 0; i < 64; i++) {
440  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
441  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
442  s->qr_size[inter][plane][qri]) /
443  (2 * s->qr_size[inter][plane][qri]);
444 
445  int qmin = 8 << (inter + !i);
446  int qscale = i ? ac_scale_factor : dc_scale_factor;
447  int qbias = (1 + inter) * 3;
448  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
449  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
450  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
451  }
452  /* all DC coefficients use the same quant so as not to interfere
453  * with DC prediction */
454  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
455  }
456  }
457 }
458 
459 /*
460  * This function initializes the loop filter boundary limits if the frame's
461  * quality index is different from the previous frame's.
462  *
463  * The filter_limit_values may not be larger than 127.
464  */
466 {
467  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
468 }
469 
470 /*
471  * This function unpacks all of the superblock/macroblock/fragment coding
472  * information from the bitstream.
473  */
475 {
476  const int superblock_starts[3] = {
477  0, s->u_superblock_start, s->v_superblock_start
478  };
479  int bit = 0;
480  int current_superblock = 0;
481  int current_run = 0;
482  int num_partial_superblocks = 0;
483 
484  int current_fragment;
485  int plane0_num_coded_frags = 0;
486 
487  if (s->keyframe) {
488  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
489  } else {
490  /* unpack the list of partially-coded superblocks */
491  bit = get_bits1(gb) ^ 1;
492  current_run = 0;
493 
494  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
495  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
496  bit = get_bits1(gb);
497  else
498  bit ^= 1;
499 
500  current_run = get_vlc2(gb, superblock_run_length_vlc,
502  if (current_run == 34)
503  current_run += get_bits(gb, 12);
504 
505  if (current_run > s->superblock_count - current_superblock) {
506  av_log(s->avctx, AV_LOG_ERROR,
507  "Invalid partially coded superblock run length\n");
508  return -1;
509  }
510 
511  memset(s->superblock_coding + current_superblock, bit, current_run);
512 
513  current_superblock += current_run;
514  if (bit)
515  num_partial_superblocks += current_run;
516  }
517 
518  /* unpack the list of fully coded superblocks if any of the blocks were
519  * not marked as partially coded in the previous step */
520  if (num_partial_superblocks < s->superblock_count) {
521  int superblocks_decoded = 0;
522 
523  current_superblock = 0;
524  bit = get_bits1(gb) ^ 1;
525  current_run = 0;
526 
527  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
528  get_bits_left(gb) > 0) {
529  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
530  bit = get_bits1(gb);
531  else
532  bit ^= 1;
533 
534  current_run = get_vlc2(gb, superblock_run_length_vlc,
536  if (current_run == 34)
537  current_run += get_bits(gb, 12);
538 
539  for (int j = 0; j < current_run; current_superblock++) {
540  if (current_superblock >= s->superblock_count) {
541  av_log(s->avctx, AV_LOG_ERROR,
542  "Invalid fully coded superblock run length\n");
543  return -1;
544  }
545 
546  /* skip any superblocks already marked as partially coded */
547  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
548  s->superblock_coding[current_superblock] = 2 * bit;
549  j++;
550  }
551  }
552  superblocks_decoded += current_run;
553  }
554  }
555 
556  /* if there were partial blocks, initialize bitstream for
557  * unpacking fragment codings */
558  if (num_partial_superblocks) {
559  current_run = 0;
560  bit = get_bits1(gb);
561  /* toggle the bit because as soon as the first run length is
562  * fetched the bit will be toggled again */
563  bit ^= 1;
564  }
565  }
566 
567  /* figure out which fragments are coded; iterate through each
568  * superblock (all planes) */
569  s->total_num_coded_frags = 0;
570  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
571 
572  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
573  : s->nkf_coded_fragment_list;
574 
575  for (int plane = 0; plane < 3; plane++) {
576  int sb_start = superblock_starts[plane];
577  int sb_end = sb_start + (plane ? s->c_superblock_count
578  : s->y_superblock_count);
579  int num_coded_frags = 0;
580 
581  if (s->keyframe) {
582  if (s->num_kf_coded_fragment[plane] == -1) {
583  for (int i = sb_start; i < sb_end; i++) {
584  /* iterate through all 16 fragments in a superblock */
585  for (int j = 0; j < 16; j++) {
586  /* if the fragment is in bounds, check its coding status */
587  current_fragment = s->superblock_fragments[i * 16 + j];
588  if (current_fragment != -1) {
589  s->coded_fragment_list[plane][num_coded_frags++] =
590  current_fragment;
591  }
592  }
593  }
594  s->num_kf_coded_fragment[plane] = num_coded_frags;
595  } else
596  num_coded_frags = s->num_kf_coded_fragment[plane];
597  } else {
598  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
599  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
600  return AVERROR_INVALIDDATA;
601  }
602  /* iterate through all 16 fragments in a superblock */
603  for (int j = 0; j < 16; j++) {
604  /* if the fragment is in bounds, check its coding status */
605  current_fragment = s->superblock_fragments[i * 16 + j];
606  if (current_fragment != -1) {
607  int coded = s->superblock_coding[i];
608 
609  if (coded == SB_PARTIALLY_CODED) {
610  /* fragment may or may not be coded; this is the case
611  * that cares about the fragment coding runs */
612  if (current_run-- == 0) {
613  bit ^= 1;
614  current_run = get_vlc2(gb, fragment_run_length_vlc, 5, 2);
615  }
616  coded = bit;
617  }
618 
619  if (coded) {
620  /* default mode; actual mode will be decoded in
621  * the next phase */
622  s->all_fragments[current_fragment].coding_method =
624  s->coded_fragment_list[plane][num_coded_frags++] =
625  current_fragment;
626  } else {
627  /* not coded; copy this fragment from the prior frame */
628  s->all_fragments[current_fragment].coding_method =
629  MODE_COPY;
630  }
631  }
632  }
633  }
634  }
635  if (!plane)
636  plane0_num_coded_frags = num_coded_frags;
637  s->total_num_coded_frags += num_coded_frags;
638  for (int i = 0; i < 64; i++)
639  s->num_coded_frags[plane][i] = num_coded_frags;
640  if (plane < 2)
641  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
642  num_coded_frags;
643  }
644  return 0;
645 }
646 
647 #define BLOCK_X (2 * mb_x + (k & 1))
648 #define BLOCK_Y (2 * mb_y + (k >> 1))
649 
650 #if CONFIG_VP4_DECODER
651 /**
652  * @return number of blocks, or > yuv_macroblock_count on error.
653  * return value is always >= 1.
654  */
655 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
656 {
657  int v = 1;
658  int bits;
659  while ((bits = show_bits(gb, 9)) == 0x1ff) {
660  skip_bits(gb, 9);
661  v += 256;
662  if (v > s->yuv_macroblock_count) {
663  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
664  return v;
665  }
666  }
667 #define body(n) { \
668  skip_bits(gb, 2 + n); \
669  v += (1 << n) + get_bits(gb, n); }
670 #define thresh(n) (0x200 - (0x80 >> n))
671 #define else_if(n) else if (bits < thresh(n)) body(n)
672  if (bits < 0x100) {
673  skip_bits(gb, 1);
674  } else if (bits < thresh(0)) {
675  skip_bits(gb, 2);
676  v += 1;
677  }
678  else_if(1)
679  else_if(2)
680  else_if(3)
681  else_if(4)
682  else_if(5)
683  else_if(6)
684  else body(7)
685 #undef body
686 #undef thresh
687 #undef else_if
688  return v;
689 }
690 
691 static int vp4_get_block_pattern(GetBitContext *gb, int *next_block_pattern_table)
692 {
693  int v = get_vlc2(gb, block_pattern_vlc[*next_block_pattern_table], 5, 1);
694  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
695  return v + 1;
696 }
697 
698 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
699 {
700  int fragment;
701  int next_block_pattern_table;
702  int bit, current_run, has_partial;
703 
704  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
705 
706  if (s->keyframe)
707  return 0;
708 
709  has_partial = 0;
710  bit = get_bits1(gb);
711  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
712  if (get_bits_left(gb) <= 0)
713  return AVERROR_INVALIDDATA;
714  current_run = vp4_get_mb_count(s, gb);
715  if (current_run > s->yuv_macroblock_count - i)
716  return -1;
717  memset(s->superblock_coding + i, 2 * bit, current_run);
718  bit ^= 1;
719  has_partial |= bit;
720  }
721 
722  if (has_partial) {
723  if (get_bits_left(gb) <= 0)
724  return AVERROR_INVALIDDATA;
725  bit = get_bits1(gb);
726  current_run = vp4_get_mb_count(s, gb);
727  for (int i = 0; i < s->yuv_macroblock_count; i++) {
728  if (!s->superblock_coding[i]) {
729  if (!current_run) {
730  bit ^= 1;
731  current_run = vp4_get_mb_count(s, gb);
732  }
733  s->superblock_coding[i] = bit;
734  current_run--;
735  }
736  }
737  if (current_run) /* handle situation when vp4_get_mb_count() fails */
738  return -1;
739  }
740 
741  next_block_pattern_table = 0;
742  for (int plane = 0, i = 0; plane < 3; plane++) {
743  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
744  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
745  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
746  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
747  int fragment_width = s->fragment_width[!!plane];
748  int fragment_height = s->fragment_height[!!plane];
749 
750  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
751  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
752  for (int j = 0; j < 4; j++) {
753  int mb_x = 2 * sb_x + (j >> 1);
754  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
755  int mb_coded, pattern, coded;
756 
757  if (mb_x >= mb_width || mb_y >= mb_height)
758  continue;
759 
760  mb_coded = s->superblock_coding[i++];
761 
762  if (mb_coded == SB_FULLY_CODED)
763  pattern = 0xF;
764  else if (mb_coded == SB_PARTIALLY_CODED)
765  pattern = vp4_get_block_pattern(gb, &next_block_pattern_table);
766  else
767  pattern = 0;
768 
769  for (int k = 0; k < 4; k++) {
770  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
771  continue;
772  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
773  coded = pattern & (8 >> k);
774  /* MODE_INTER_NO_MV is the default for coded fragments.
775  the actual method is decoded in the next phase. */
776  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
777  }
778  }
779  }
780  }
781  }
782  return 0;
783 }
784 #endif
785 
786 /*
787  * This function unpacks all the coding mode data for individual macroblocks
788  * from the bitstream.
789  */
791 {
792  int scheme;
793  int current_macroblock;
794  int current_fragment;
795  int coding_mode;
796  int custom_mode_alphabet[CODING_MODE_COUNT];
797  const int *alphabet;
798  Vp3Fragment *frag;
799 
800  if (s->keyframe) {
801  for (int i = 0; i < s->fragment_count; i++)
802  s->all_fragments[i].coding_method = MODE_INTRA;
803  } else {
804  /* fetch the mode coding scheme for this frame */
805  scheme = get_bits(gb, 3);
806 
807  /* is it a custom coding scheme? */
808  if (scheme == 0) {
809  for (int i = 0; i < 8; i++)
810  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
811  for (int i = 0; i < 8; i++)
812  custom_mode_alphabet[get_bits(gb, 3)] = i;
813  alphabet = custom_mode_alphabet;
814  } else
815  alphabet = ModeAlphabet[scheme - 1];
816 
817  /* iterate through all of the macroblocks that contain 1 or more
818  * coded fragments */
819  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
820  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
821  if (get_bits_left(gb) <= 0)
822  return -1;
823 
824  for (int j = 0; j < 4; j++) {
825  int k;
826  int mb_x = 2 * sb_x + (j >> 1);
827  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
828  current_macroblock = mb_y * s->macroblock_width + mb_x;
829 
830  if (mb_x >= s->macroblock_width ||
831  mb_y >= s->macroblock_height)
832  continue;
833 
834  /* coding modes are only stored if the macroblock has
835  * at least one luma block coded, otherwise it must be
836  * INTER_NO_MV */
837  for (k = 0; k < 4; k++) {
838  current_fragment = BLOCK_Y *
839  s->fragment_width[0] + BLOCK_X;
840  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
841  break;
842  }
843  if (k == 4) {
844  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
845  continue;
846  }
847 
848  /* mode 7 means get 3 bits for each coding mode */
849  if (scheme == 7)
850  coding_mode = get_bits(gb, 3);
851  else
852  coding_mode = alphabet[get_vlc2(gb, mode_code_vlc, 4, 2)];
853 
854  s->macroblock_coding[current_macroblock] = coding_mode;
855  for (k = 0; k < 4; k++) {
856  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
857  if (frag->coding_method != MODE_COPY)
858  frag->coding_method = coding_mode;
859  }
860 
861 #define SET_CHROMA_MODES \
862  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
863  frag[s->fragment_start[1]].coding_method = coding_mode; \
864  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
865  frag[s->fragment_start[2]].coding_method = coding_mode;
866 
867  if (s->chroma_y_shift) {
868  frag = s->all_fragments + mb_y *
869  s->fragment_width[1] + mb_x;
871  } else if (s->chroma_x_shift) {
872  frag = s->all_fragments +
873  2 * mb_y * s->fragment_width[1] + mb_x;
874  for (k = 0; k < 2; k++) {
876  frag += s->fragment_width[1];
877  }
878  } else {
879  for (k = 0; k < 4; k++) {
880  frag = s->all_fragments +
881  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
883  }
884  }
885  }
886  }
887  }
888  }
889 
890  return 0;
891 }
892 
893 static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
894 {
895 #if CONFIG_VP4_DECODER
896  int v = get_vlc2(gb, vp4_mv_vlc_table[axis][vp4_mv_table_selector[FFABS(last_motion)]],
897  VP4_MV_VLC_BITS, 2);
898  return last_motion < 0 ? -v : v;
899 #else
900  return 0;
901 #endif
902 }
903 
904 /*
905  * This function unpacks all the motion vectors for the individual
906  * macroblocks from the bitstream.
907  */
909 {
910  int coding_mode;
911  int motion_x[4];
912  int motion_y[4];
913  int last_motion_x = 0;
914  int last_motion_y = 0;
915  int prior_last_motion_x = 0;
916  int prior_last_motion_y = 0;
917  int last_gold_motion_x = 0;
918  int last_gold_motion_y = 0;
919  int current_macroblock;
920  int current_fragment;
921  int frag;
922 
923  if (s->keyframe)
924  return 0;
925 
926  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
927  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
928 
929  /* iterate through all of the macroblocks that contain 1 or more
930  * coded fragments */
931  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
932  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
933  if (get_bits_left(gb) <= 0)
934  return -1;
935 
936  for (int j = 0; j < 4; j++) {
937  int mb_x = 2 * sb_x + (j >> 1);
938  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
939  current_macroblock = mb_y * s->macroblock_width + mb_x;
940 
941  if (mb_x >= s->macroblock_width ||
942  mb_y >= s->macroblock_height ||
943  s->macroblock_coding[current_macroblock] == MODE_COPY)
944  continue;
945 
946  switch (s->macroblock_coding[current_macroblock]) {
947  case MODE_GOLDEN_MV:
948  if (coding_mode == 2) { /* VP4 */
949  last_gold_motion_x = motion_x[0] = vp4_get_mv(gb, 0, last_gold_motion_x);
950  last_gold_motion_y = motion_y[0] = vp4_get_mv(gb, 1, last_gold_motion_y);
951  break;
952  } /* otherwise fall through */
953  case MODE_INTER_PLUS_MV:
954  /* all 6 fragments use the same motion vector */
955  if (coding_mode == 0) {
956  motion_x[0] = get_vlc2(gb, motion_vector_vlc,
957  VP3_MV_VLC_BITS, 2);
958  motion_y[0] = get_vlc2(gb, motion_vector_vlc,
959  VP3_MV_VLC_BITS, 2);
960  } else if (coding_mode == 1) {
961  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
962  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
963  } else { /* VP4 */
964  motion_x[0] = vp4_get_mv(gb, 0, last_motion_x);
965  motion_y[0] = vp4_get_mv(gb, 1, last_motion_y);
966  }
967 
968  /* vector maintenance, only on MODE_INTER_PLUS_MV */
969  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
970  prior_last_motion_x = last_motion_x;
971  prior_last_motion_y = last_motion_y;
972  last_motion_x = motion_x[0];
973  last_motion_y = motion_y[0];
974  }
975  break;
976 
977  case MODE_INTER_FOURMV:
978  /* vector maintenance */
979  prior_last_motion_x = last_motion_x;
980  prior_last_motion_y = last_motion_y;
981 
982  /* fetch 4 vectors from the bitstream, one for each
983  * Y fragment, then average for the C fragment vectors */
984  for (int k = 0; k < 4; k++) {
985  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
986  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
987  if (coding_mode == 0) {
988  motion_x[k] = get_vlc2(gb, motion_vector_vlc,
989  VP3_MV_VLC_BITS, 2);
990  motion_y[k] = get_vlc2(gb, motion_vector_vlc,
991  VP3_MV_VLC_BITS, 2);
992  } else if (coding_mode == 1) {
993  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
994  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
995  } else { /* VP4 */
996  motion_x[k] = vp4_get_mv(gb, 0, prior_last_motion_x);
997  motion_y[k] = vp4_get_mv(gb, 1, prior_last_motion_y);
998  }
999  last_motion_x = motion_x[k];
1000  last_motion_y = motion_y[k];
1001  } else {
1002  motion_x[k] = 0;
1003  motion_y[k] = 0;
1004  }
1005  }
1006  break;
1007 
1008  case MODE_INTER_LAST_MV:
1009  /* all 6 fragments use the last motion vector */
1010  motion_x[0] = last_motion_x;
1011  motion_y[0] = last_motion_y;
1012 
1013  /* no vector maintenance (last vector remains the
1014  * last vector) */
1015  break;
1016 
1017  case MODE_INTER_PRIOR_LAST:
1018  /* all 6 fragments use the motion vector prior to the
1019  * last motion vector */
1020  motion_x[0] = prior_last_motion_x;
1021  motion_y[0] = prior_last_motion_y;
1022 
1023  /* vector maintenance */
1024  prior_last_motion_x = last_motion_x;
1025  prior_last_motion_y = last_motion_y;
1026  last_motion_x = motion_x[0];
1027  last_motion_y = motion_y[0];
1028  break;
1029 
1030  default:
1031  /* covers intra, inter without MV, golden without MV */
1032  motion_x[0] = 0;
1033  motion_y[0] = 0;
1034 
1035  /* no vector maintenance */
1036  break;
1037  }
1038 
1039  /* assign the motion vectors to the correct fragments */
1040  for (int k = 0; k < 4; k++) {
1041  current_fragment =
1042  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1043  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1044  s->motion_val[0][current_fragment][0] = motion_x[k];
1045  s->motion_val[0][current_fragment][1] = motion_y[k];
1046  } else {
1047  s->motion_val[0][current_fragment][0] = motion_x[0];
1048  s->motion_val[0][current_fragment][1] = motion_y[0];
1049  }
1050  }
1051 
1052  if (s->chroma_y_shift) {
1053  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1054  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1055  motion_x[2] + motion_x[3], 2);
1056  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1057  motion_y[2] + motion_y[3], 2);
1058  }
1059  if (s->version <= 2) {
1060  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1061  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1062  }
1063  frag = mb_y * s->fragment_width[1] + mb_x;
1064  s->motion_val[1][frag][0] = motion_x[0];
1065  s->motion_val[1][frag][1] = motion_y[0];
1066  } else if (s->chroma_x_shift) {
1067  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1068  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1069  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1070  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1071  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1072  } else {
1073  motion_x[1] = motion_x[0];
1074  motion_y[1] = motion_y[0];
1075  }
1076  if (s->version <= 2) {
1077  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1078  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1079  }
1080  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1081  for (int k = 0; k < 2; k++) {
1082  s->motion_val[1][frag][0] = motion_x[k];
1083  s->motion_val[1][frag][1] = motion_y[k];
1084  frag += s->fragment_width[1];
1085  }
1086  } else {
1087  for (int k = 0; k < 4; k++) {
1088  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1089  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1090  s->motion_val[1][frag][0] = motion_x[k];
1091  s->motion_val[1][frag][1] = motion_y[k];
1092  } else {
1093  s->motion_val[1][frag][0] = motion_x[0];
1094  s->motion_val[1][frag][1] = motion_y[0];
1095  }
1096  }
1097  }
1098  }
1099  }
1100  }
1101 
1102  return 0;
1103 }
1104 
1106 {
1107  int num_blocks = s->total_num_coded_frags;
1108 
1109  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1110  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1111  int bit, run_length;
1112 
1113  bit = get_bits1(gb) ^ 1;
1114  run_length = 0;
1115 
1116  do {
1117  if (run_length == MAXIMUM_LONG_BIT_RUN)
1118  bit = get_bits1(gb);
1119  else
1120  bit ^= 1;
1121 
1122  run_length = get_vlc2(gb, superblock_run_length_vlc,
1123  SUPERBLOCK_VLC_BITS, 2);
1124  if (run_length == 34)
1125  run_length += get_bits(gb, 12);
1126  blocks_decoded += run_length;
1127 
1128  if (!bit)
1129  num_blocks_at_qpi += run_length;
1130 
1131  for (int j = 0; j < run_length; i++) {
1132  if (i >= s->total_num_coded_frags)
1133  return -1;
1134 
1135  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1136  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1137  j++;
1138  }
1139  }
1140  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1141 
1142  num_blocks -= num_blocks_at_qpi;
1143  }
1144 
1145  return 0;
1146 }
1147 
1148 static inline int get_eob_run(GetBitContext *gb, int token)
1149 {
1150  int v = eob_run_table[token].base;
1151  if (eob_run_table[token].bits)
1152  v += get_bits(gb, eob_run_table[token].bits);
1153  return v;
1154 }
1155 
1156 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1157 {
1158  int bits_to_get, zero_run;
1159 
1160  bits_to_get = coeff_get_bits[token];
1161  if (bits_to_get)
1162  bits_to_get = get_bits(gb, bits_to_get);
1163  *coeff = coeff_tables[token][bits_to_get];
1164 
1165  zero_run = zero_run_base[token];
1166  if (zero_run_get_bits[token])
1167  zero_run += get_bits(gb, zero_run_get_bits[token]);
1168 
1169  return zero_run;
1170 }
1171 
1172 /*
1173  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1174  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1175  * data. This function unpacks all the VLCs for either the Y plane or both
1176  * C planes, and is called for DC coefficients or different AC coefficient
1177  * levels (since different coefficient types require different VLC tables.
1178  *
1179  * This function returns a residual eob run. E.g, if a particular token gave
1180  * instructions to EOB the next 5 fragments and there were only 2 fragments
1181  * left in the current fragment range, 3 would be returned so that it could
1182  * be passed into the next call to this same function.
1183  */
1185  const VLCElem *vlc_table, int coeff_index,
1186  int plane,
1187  int eob_run)
1188 {
1189  int j = 0;
1190  int token;
1191  int zero_run = 0;
1192  int16_t coeff = 0;
1193  int blocks_ended;
1194  int coeff_i = 0;
1195  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1196  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1197 
1198  /* local references to structure members to avoid repeated dereferences */
1199  const int *coded_fragment_list = s->coded_fragment_list[plane];
1200  Vp3Fragment *all_fragments = s->all_fragments;
1201 
1202  if (num_coeffs < 0) {
1203  av_log(s->avctx, AV_LOG_ERROR,
1204  "Invalid number of coefficients at level %d\n", coeff_index);
1205  return AVERROR_INVALIDDATA;
1206  }
1207 
1208  if (eob_run > num_coeffs) {
1209  coeff_i =
1210  blocks_ended = num_coeffs;
1211  eob_run -= num_coeffs;
1212  } else {
1213  coeff_i =
1214  blocks_ended = eob_run;
1215  eob_run = 0;
1216  }
1217 
1218  // insert fake EOB token to cover the split between planes or zzi
1219  if (blocks_ended)
1220  dct_tokens[j++] = blocks_ended << 2;
1221 
1222  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1223  /* decode a VLC into a token */
1224  token = get_vlc2(gb, vlc_table, 11, 3);
1225  /* use the token to get a zero run, a coefficient, and an eob run */
1226  if ((unsigned) token <= 6U) {
1227  eob_run = get_eob_run(gb, token);
1228  if (!eob_run)
1229  eob_run = INT_MAX;
1230 
1231  // record only the number of blocks ended in this plane,
1232  // any spill will be recorded in the next plane.
1233  if (eob_run > num_coeffs - coeff_i) {
1234  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1235  blocks_ended += num_coeffs - coeff_i;
1236  eob_run -= num_coeffs - coeff_i;
1237  coeff_i = num_coeffs;
1238  } else {
1239  dct_tokens[j++] = TOKEN_EOB(eob_run);
1240  blocks_ended += eob_run;
1241  coeff_i += eob_run;
1242  eob_run = 0;
1243  }
1244  } else if (token >= 0) {
1245  zero_run = get_coeff(gb, token, &coeff);
1246 
1247  if (zero_run) {
1248  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1249  } else {
1250  // Save DC into the fragment structure. DC prediction is
1251  // done in raster order, so the actual DC can't be in with
1252  // other tokens. We still need the token in dct_tokens[]
1253  // however, or else the structure collapses on itself.
1254  if (!coeff_index)
1255  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1256 
1257  dct_tokens[j++] = TOKEN_COEFF(coeff);
1258  }
1259 
1260  if (coeff_index + zero_run > 64) {
1261  av_log(s->avctx, AV_LOG_DEBUG,
1262  "Invalid zero run of %d with %d coeffs left\n",
1263  zero_run, 64 - coeff_index);
1264  zero_run = 64 - coeff_index;
1265  }
1266 
1267  // zero runs code multiple coefficients,
1268  // so don't try to decode coeffs for those higher levels
1269  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1270  s->num_coded_frags[plane][i]--;
1271  coeff_i++;
1272  } else {
1273  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1274  return -1;
1275  }
1276  }
1277 
1278  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1279  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1280 
1281  // decrement the number of blocks that have higher coefficients for each
1282  // EOB run at this level
1283  if (blocks_ended)
1284  for (int i = coeff_index + 1; i < 64; i++)
1285  s->num_coded_frags[plane][i] -= blocks_ended;
1286 
1287  // setup the next buffer
1288  if (plane < 2)
1289  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1290  else if (coeff_index < 63)
1291  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1292 
1293  return eob_run;
1294 }
1295 
1297  int first_fragment,
1298  int fragment_width,
1299  int fragment_height);
1300 /*
1301  * This function unpacks all of the DCT coefficient data from the
1302  * bitstream.
1303  */
1305 {
1306  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1307  int dc_y_table;
1308  int dc_c_table;
1309  int ac_y_table;
1310  int ac_c_table;
1311  int residual_eob_run = 0;
1312  const VLCElem *y_tables[64], *c_tables[64];
1313 
1314  s->dct_tokens[0][0] = s->dct_tokens_base;
1315 
1316  if (get_bits_left(gb) < 16)
1317  return AVERROR_INVALIDDATA;
1318 
1319  /* fetch the DC table indexes */
1320  dc_y_table = get_bits(gb, 4);
1321  dc_c_table = get_bits(gb, 4);
1322 
1323  /* unpack the Y plane DC coefficients */
1324  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_y_table], 0,
1325  0, residual_eob_run);
1326  if (residual_eob_run < 0)
1327  return residual_eob_run;
1328  if (get_bits_left(gb) < 8)
1329  return AVERROR_INVALIDDATA;
1330 
1331  /* reverse prediction of the Y-plane DC coefficients */
1332  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1333 
1334  /* unpack the C plane DC coefficients */
1335  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1336  1, residual_eob_run);
1337  if (residual_eob_run < 0)
1338  return residual_eob_run;
1339  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1340  2, residual_eob_run);
1341  if (residual_eob_run < 0)
1342  return residual_eob_run;
1343 
1344  /* reverse prediction of the C-plane DC coefficients */
1345  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1346  reverse_dc_prediction(s, s->fragment_start[1],
1347  s->fragment_width[1], s->fragment_height[1]);
1348  reverse_dc_prediction(s, s->fragment_start[2],
1349  s->fragment_width[1], s->fragment_height[1]);
1350  }
1351 
1352  if (get_bits_left(gb) < 8)
1353  return AVERROR_INVALIDDATA;
1354  /* fetch the AC table indexes */
1355  ac_y_table = get_bits(gb, 4);
1356  ac_c_table = get_bits(gb, 4);
1357 
1358  /* build tables of AC VLC tables */
1359  for (int i = 1; i <= 5; i++) {
1360  /* AC VLC table group 1 */
1361  y_tables[i] = coeff_vlc[ac_y_table + 16];
1362  c_tables[i] = coeff_vlc[ac_c_table + 16];
1363  }
1364  for (int i = 6; i <= 14; i++) {
1365  /* AC VLC table group 2 */
1366  y_tables[i] = coeff_vlc[ac_y_table + 32];
1367  c_tables[i] = coeff_vlc[ac_c_table + 32];
1368  }
1369  for (int i = 15; i <= 27; i++) {
1370  /* AC VLC table group 3 */
1371  y_tables[i] = coeff_vlc[ac_y_table + 48];
1372  c_tables[i] = coeff_vlc[ac_c_table + 48];
1373  }
1374  for (int i = 28; i <= 63; i++) {
1375  /* AC VLC table group 4 */
1376  y_tables[i] = coeff_vlc[ac_y_table + 64];
1377  c_tables[i] = coeff_vlc[ac_c_table + 64];
1378  }
1379 
1380  /* decode all AC coefficients */
1381  for (int i = 1; i <= 63; i++) {
1382  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1383  0, residual_eob_run);
1384  if (residual_eob_run < 0)
1385  return residual_eob_run;
1386 
1387  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1388  1, residual_eob_run);
1389  if (residual_eob_run < 0)
1390  return residual_eob_run;
1391  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1392  2, residual_eob_run);
1393  if (residual_eob_run < 0)
1394  return residual_eob_run;
1395  }
1396 
1397  return 0;
1398 }
1399 
1400 #if CONFIG_VP4_DECODER
1401 /**
1402  * eob_tracker[] is instead of TOKEN_EOB(value)
1403  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1404  *
1405  * @return < 0 on error
1406  */
1407 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1408  const VLCElem *const vlc_tables[64],
1409  int plane, int eob_tracker[64], int fragment)
1410 {
1411  int token;
1412  int zero_run = 0;
1413  int16_t coeff = 0;
1414  int coeff_i = 0;
1415  int eob_run;
1416 
1417  while (!eob_tracker[coeff_i]) {
1418  if (get_bits_left(gb) < 1)
1419  return AVERROR_INVALIDDATA;
1420 
1421  token = get_vlc2(gb, vlc_tables[coeff_i], 11, 3);
1422 
1423  /* use the token to get a zero run, a coefficient, and an eob run */
1424  if ((unsigned) token <= 6U) {
1425  eob_run = get_eob_run(gb, token);
1426  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1427  eob_tracker[coeff_i] = eob_run - 1;
1428  return 0;
1429  } else if (token >= 0) {
1430  zero_run = get_coeff(gb, token, &coeff);
1431 
1432  if (zero_run) {
1433  if (coeff_i + zero_run > 64) {
1434  av_log(s->avctx, AV_LOG_DEBUG,
1435  "Invalid zero run of %d with %d coeffs left\n",
1436  zero_run, 64 - coeff_i);
1437  zero_run = 64 - coeff_i;
1438  }
1439  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1440  coeff_i += zero_run;
1441  } else {
1442  if (!coeff_i)
1443  s->all_fragments[fragment].dc = coeff;
1444 
1445  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1446  }
1447  coeff_i++;
1448  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1449  return 0; /* stop */
1450  } else {
1451  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1452  return -1;
1453  }
1454  }
1455  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1456  eob_tracker[coeff_i]--;
1457  return 0;
1458 }
1459 
1460 static void vp4_dc_predictor_reset(VP4Predictor *p)
1461 {
1462  p->dc = 0;
1463  p->type = VP4_DC_UNDEFINED;
1464 }
1465 
1466 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1467 {
1468  for (int i = 0; i < 4; i++)
1469  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1470 
1471  for (int j = 1; j < 5; j++)
1472  for (int i = 0; i < 4; i++)
1473  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1474 }
1475 
1476 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1477 {
1478  for (int i = 0; i < 4; i++)
1479  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1480 
1481  for (int i = 1; i < 5; i++)
1482  dc_pred[i][0] = dc_pred[i][4];
1483 }
1484 
1485 /* note: dc_pred points to the current block */
1486 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1487 {
1488  int count = 0;
1489  int dc = 0;
1490 
1491  if (dc_pred[-6].type == type) {
1492  dc += dc_pred[-6].dc;
1493  count++;
1494  }
1495 
1496  if (dc_pred[6].type == type) {
1497  dc += dc_pred[6].dc;
1498  count++;
1499  }
1500 
1501  if (count != 2 && dc_pred[-1].type == type) {
1502  dc += dc_pred[-1].dc;
1503  count++;
1504  }
1505 
1506  if (count != 2 && dc_pred[1].type == type) {
1507  dc += dc_pred[1].dc;
1508  count++;
1509  }
1510 
1511  /* using division instead of shift to correctly handle negative values */
1512  return count == 2 ? dc / 2 : last_dc[type];
1513 }
1514 
1515 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1516 {
1517  int16_t *base = s->dct_tokens_base;
1518  for (int plane = 0; plane < 3; plane++) {
1519  for (int i = 0; i < 64; i++) {
1520  s->dct_tokens[plane][i] = base;
1521  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1522  }
1523  }
1524 }
1525 
1526 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1527 {
1528  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1529  int dc_y_table;
1530  int dc_c_table;
1531  int ac_y_table;
1532  int ac_c_table;
1533  const VLCElem *tables[2][64];
1534  int eob_tracker[64];
1535  VP4Predictor dc_pred[6][6];
1536  int last_dc[NB_VP4_DC_TYPES];
1537 
1538  if (get_bits_left(gb) < 16)
1539  return AVERROR_INVALIDDATA;
1540 
1541  /* fetch the DC table indexes */
1542  dc_y_table = get_bits(gb, 4);
1543  dc_c_table = get_bits(gb, 4);
1544 
1545  ac_y_table = get_bits(gb, 4);
1546  ac_c_table = get_bits(gb, 4);
1547 
1548  /* build tables of DC/AC VLC tables */
1549 
1550  /* DC table group */
1551  tables[0][0] = coeff_vlc[dc_y_table];
1552  tables[1][0] = coeff_vlc[dc_c_table];
1553  for (int i = 1; i <= 5; i++) {
1554  /* AC VLC table group 1 */
1555  tables[0][i] = coeff_vlc[ac_y_table + 16];
1556  tables[1][i] = coeff_vlc[ac_c_table + 16];
1557  }
1558  for (int i = 6; i <= 14; i++) {
1559  /* AC VLC table group 2 */
1560  tables[0][i] = coeff_vlc[ac_y_table + 32];
1561  tables[1][i] = coeff_vlc[ac_c_table + 32];
1562  }
1563  for (int i = 15; i <= 27; i++) {
1564  /* AC VLC table group 3 */
1565  tables[0][i] = coeff_vlc[ac_y_table + 48];
1566  tables[1][i] = coeff_vlc[ac_c_table + 48];
1567  }
1568  for (int i = 28; i <= 63; i++) {
1569  /* AC VLC table group 4 */
1570  tables[0][i] = coeff_vlc[ac_y_table + 64];
1571  tables[1][i] = coeff_vlc[ac_c_table + 64];
1572  }
1573 
1574  vp4_set_tokens_base(s);
1575 
1576  memset(last_dc, 0, sizeof(last_dc));
1577 
1578  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1579  memset(eob_tracker, 0, sizeof(eob_tracker));
1580 
1581  /* initialise dc prediction */
1582  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1583  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1584 
1585  for (int j = 0; j < 6; j++)
1586  for (int i = 0; i < 6; i++)
1587  vp4_dc_predictor_reset(&dc_pred[j][i]);
1588 
1589  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1590  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1591  vp4_dc_pred_before(s, dc_pred, sb_x);
1592  for (int j = 0; j < 16; j++) {
1593  int hx = hilbert_offset[j][0];
1594  int hy = hilbert_offset[j][1];
1595  int x = 4 * sb_x + hx;
1596  int y = 4 * sb_y + hy;
1597  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1598  int fragment, dc_block_type;
1599 
1600  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1601  continue;
1602 
1603  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1604 
1605  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1606  continue;
1607 
1608  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1609  return -1;
1610 
1611  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1612 
1613  s->all_fragments[fragment].dc +=
1614  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1615 
1616  this_dc_pred->type = dc_block_type,
1617  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1618  }
1619  vp4_dc_pred_after(s, dc_pred, sb_x);
1620  }
1621  }
1622  }
1623 
1624  vp4_set_tokens_base(s);
1625 
1626  return 0;
1627 }
1628 #endif
1629 
1630 /*
1631  * This function reverses the DC prediction for each coded fragment in
1632  * the frame. Much of this function is adapted directly from the original
1633  * VP3 source code.
1634  */
1635 #define COMPATIBLE_FRAME(x) \
1636  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1637 #define DC_COEFF(u) s->all_fragments[u].dc
1638 
1640  int first_fragment,
1641  int fragment_width,
1642  int fragment_height)
1643 {
1644 #define PUL 8
1645 #define PU 4
1646 #define PUR 2
1647 #define PL 1
1648 
1649  int i = first_fragment;
1650 
1651  int predicted_dc;
1652 
1653  /* DC values for the left, up-left, up, and up-right fragments */
1654  int vl, vul, vu, vur;
1655 
1656  /* indexes for the left, up-left, up, and up-right fragments */
1657  int l, ul, u, ur;
1658 
1659  /*
1660  * The 6 fields mean:
1661  * 0: up-left multiplier
1662  * 1: up multiplier
1663  * 2: up-right multiplier
1664  * 3: left multiplier
1665  */
1666  static const int predictor_transform[16][4] = {
1667  { 0, 0, 0, 0 },
1668  { 0, 0, 0, 128 }, // PL
1669  { 0, 0, 128, 0 }, // PUR
1670  { 0, 0, 53, 75 }, // PUR|PL
1671  { 0, 128, 0, 0 }, // PU
1672  { 0, 64, 0, 64 }, // PU |PL
1673  { 0, 128, 0, 0 }, // PU |PUR
1674  { 0, 0, 53, 75 }, // PU |PUR|PL
1675  { 128, 0, 0, 0 }, // PUL
1676  { 0, 0, 0, 128 }, // PUL|PL
1677  { 64, 0, 64, 0 }, // PUL|PUR
1678  { 0, 0, 53, 75 }, // PUL|PUR|PL
1679  { 0, 128, 0, 0 }, // PUL|PU
1680  { -104, 116, 0, 116 }, // PUL|PU |PL
1681  { 24, 80, 24, 0 }, // PUL|PU |PUR
1682  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1683  };
1684 
1685  /* This table shows which types of blocks can use other blocks for
1686  * prediction. For example, INTRA is the only mode in this table to
1687  * have a frame number of 0. That means INTRA blocks can only predict
1688  * from other INTRA blocks. There are 2 golden frame coding types;
1689  * blocks encoding in these modes can only predict from other blocks
1690  * that were encoded with these 1 of these 2 modes. */
1691  static const unsigned char compatible_frame[9] = {
1692  1, /* MODE_INTER_NO_MV */
1693  0, /* MODE_INTRA */
1694  1, /* MODE_INTER_PLUS_MV */
1695  1, /* MODE_INTER_LAST_MV */
1696  1, /* MODE_INTER_PRIOR_MV */
1697  2, /* MODE_USING_GOLDEN */
1698  2, /* MODE_GOLDEN_MV */
1699  1, /* MODE_INTER_FOUR_MV */
1700  3 /* MODE_COPY */
1701  };
1702  int current_frame_type;
1703 
1704  /* there is a last DC predictor for each of the 3 frame types */
1705  short last_dc[3];
1706 
1707  int transform = 0;
1708 
1709  vul =
1710  vu =
1711  vur =
1712  vl = 0;
1713  last_dc[0] =
1714  last_dc[1] =
1715  last_dc[2] = 0;
1716 
1717  /* for each fragment row... */
1718  for (int y = 0; y < fragment_height; y++) {
1719  /* for each fragment in a row... */
1720  for (int x = 0; x < fragment_width; x++, i++) {
1721 
1722  /* reverse prediction if this block was coded */
1723  if (s->all_fragments[i].coding_method != MODE_COPY) {
1724  current_frame_type =
1725  compatible_frame[s->all_fragments[i].coding_method];
1726 
1727  transform = 0;
1728  if (x) {
1729  l = i - 1;
1730  vl = DC_COEFF(l);
1731  if (COMPATIBLE_FRAME(l))
1732  transform |= PL;
1733  }
1734  if (y) {
1735  u = i - fragment_width;
1736  vu = DC_COEFF(u);
1737  if (COMPATIBLE_FRAME(u))
1738  transform |= PU;
1739  if (x) {
1740  ul = i - fragment_width - 1;
1741  vul = DC_COEFF(ul);
1742  if (COMPATIBLE_FRAME(ul))
1743  transform |= PUL;
1744  }
1745  if (x + 1 < fragment_width) {
1746  ur = i - fragment_width + 1;
1747  vur = DC_COEFF(ur);
1748  if (COMPATIBLE_FRAME(ur))
1749  transform |= PUR;
1750  }
1751  }
1752 
1753  if (transform == 0) {
1754  /* if there were no fragments to predict from, use last
1755  * DC saved */
1756  predicted_dc = last_dc[current_frame_type];
1757  } else {
1758  /* apply the appropriate predictor transform */
1759  predicted_dc =
1760  (predictor_transform[transform][0] * vul) +
1761  (predictor_transform[transform][1] * vu) +
1762  (predictor_transform[transform][2] * vur) +
1763  (predictor_transform[transform][3] * vl);
1764 
1765  predicted_dc /= 128;
1766 
1767  /* check for outranging on the [ul u l] and
1768  * [ul u ur l] predictors */
1769  if ((transform == 15) || (transform == 13)) {
1770  if (FFABS(predicted_dc - vu) > 128)
1771  predicted_dc = vu;
1772  else if (FFABS(predicted_dc - vl) > 128)
1773  predicted_dc = vl;
1774  else if (FFABS(predicted_dc - vul) > 128)
1775  predicted_dc = vul;
1776  }
1777  }
1778 
1779  /* at long last, apply the predictor */
1780  DC_COEFF(i) += predicted_dc;
1781  /* save the DC */
1782  last_dc[current_frame_type] = DC_COEFF(i);
1783  }
1784  }
1785  }
1786 }
1787 
1788 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1789  int ystart, int yend)
1790 {
1791  int *bounding_values = s->bounding_values_array + 127;
1792 
1793  int width = s->fragment_width[!!plane];
1794  int height = s->fragment_height[!!plane];
1795  int fragment = s->fragment_start[plane] + ystart * width;
1796  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1797  uint8_t *plane_data = s->current_frame.f->data[plane];
1798  if (!s->flipped_image)
1799  stride = -stride;
1800  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1801 
1802  for (int y = ystart; y < yend; y++) {
1803  for (int x = 0; x < width; x++) {
1804  /* This code basically just deblocks on the edges of coded blocks.
1805  * However, it has to be much more complicated because of the
1806  * brain damaged deblock ordering used in VP3/Theora. Order matters
1807  * because some pixels get filtered twice. */
1808  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1809  /* do not perform left edge filter for left columns frags */
1810  if (x > 0) {
1811  s->vp3dsp.h_loop_filter(
1812  plane_data + 8 * x,
1813  stride, bounding_values);
1814  }
1815 
1816  /* do not perform top edge filter for top row fragments */
1817  if (y > 0) {
1818  s->vp3dsp.v_loop_filter(
1819  plane_data + 8 * x,
1820  stride, bounding_values);
1821  }
1822 
1823  /* do not perform right edge filter for right column
1824  * fragments or if right fragment neighbor is also coded
1825  * in this frame (it will be filtered in next iteration) */
1826  if ((x < width - 1) &&
1827  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1828  s->vp3dsp.h_loop_filter(
1829  plane_data + 8 * x + 8,
1830  stride, bounding_values);
1831  }
1832 
1833  /* do not perform bottom edge filter for bottom row
1834  * fragments or if bottom fragment neighbor is also coded
1835  * in this frame (it will be filtered in the next row) */
1836  if ((y < height - 1) &&
1837  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1838  s->vp3dsp.v_loop_filter(
1839  plane_data + 8 * x + 8 * stride,
1840  stride, bounding_values);
1841  }
1842  }
1843 
1844  fragment++;
1845  }
1846  plane_data += 8 * stride;
1847  }
1848 }
1849 
1850 /**
1851  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1852  * for the next block in coding order
1853  */
1854 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1855  int plane, int inter, int16_t block[64])
1856 {
1857  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1858  const uint8_t *perm = s->idct_scantable;
1859  int i = 0;
1860 
1861  do {
1862  int token = *s->dct_tokens[plane][i];
1863  switch (token & 3) {
1864  case 0: // EOB
1865  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1866  s->dct_tokens[plane][i]++;
1867  else
1868  *s->dct_tokens[plane][i] = token & ~3;
1869  goto end;
1870  case 1: // zero run
1871  s->dct_tokens[plane][i]++;
1872  i += (token >> 2) & 0x7f;
1873  if (i > 63) {
1874  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1875  return i;
1876  }
1877  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1878  i++;
1879  break;
1880  case 2: // coeff
1881  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1882  s->dct_tokens[plane][i++]++;
1883  break;
1884  default: // shouldn't happen
1885  return i;
1886  }
1887  } while (i < 64);
1888  // return value is expected to be a valid level
1889  i--;
1890 end:
1891  // the actual DC+prediction is in the fragment structure
1892  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1893  return i;
1894 }
1895 
1896 /**
1897  * called when all pixels up to row y are complete
1898  */
1900 {
1901  int h, cy;
1903 
1904  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1905  int y_flipped = s->flipped_image ? s->height - y : y;
1906 
1907  /* At the end of the frame, report INT_MAX instead of the height of
1908  * the frame. This makes the other threads' ff_thread_await_progress()
1909  * calls cheaper, because they don't have to clip their values. */
1910  ff_thread_report_progress(&s->current_frame,
1911  y_flipped == s->height ? INT_MAX
1912  : y_flipped - 1,
1913  0);
1914  }
1915 
1916  if (!s->avctx->draw_horiz_band)
1917  return;
1918 
1919  h = y - s->last_slice_end;
1920  s->last_slice_end = y;
1921  y -= h;
1922 
1923  if (!s->flipped_image)
1924  y = s->height - y - h;
1925 
1926  cy = y >> s->chroma_y_shift;
1927  offset[0] = s->current_frame.f->linesize[0] * y;
1928  offset[1] = s->current_frame.f->linesize[1] * cy;
1929  offset[2] = s->current_frame.f->linesize[2] * cy;
1930  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1931  offset[i] = 0;
1932 
1933  emms_c();
1934  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1935 }
1936 
1937 /**
1938  * Wait for the reference frame of the current fragment.
1939  * The progress value is in luma pixel rows.
1940  */
1942  int motion_y, int y)
1943 {
1944  const ThreadFrame *ref_frame;
1945  int ref_row;
1946  int border = motion_y & 1;
1947 
1948  if (fragment->coding_method == MODE_USING_GOLDEN ||
1949  fragment->coding_method == MODE_GOLDEN_MV)
1950  ref_frame = &s->golden_frame;
1951  else
1952  ref_frame = &s->last_frame;
1953 
1954  ref_row = y + (motion_y >> 1);
1955  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1956 
1957  ff_thread_await_progress(ref_frame, ref_row, 0);
1958 }
1959 
1960 #if CONFIG_VP4_DECODER
1961 /**
1962  * @return non-zero if temp (edge_emu_buffer) was populated
1963  */
1964 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1965  const uint8_t *motion_source, ptrdiff_t stride,
1966  int src_x, int src_y, uint8_t *temp)
1967 {
1968  int motion_shift = plane ? 4 : 2;
1969  int subpel_mask = plane ? 3 : 1;
1970  int *bounding_values = s->bounding_values_array + 127;
1971 
1972  int x, y;
1973  int x2, y2;
1974  int x_subpel, y_subpel;
1975  int x_offset, y_offset;
1976 
1977  int block_width = plane ? 8 : 16;
1978  int plane_width = s->width >> (plane && s->chroma_x_shift);
1979  int plane_height = s->height >> (plane && s->chroma_y_shift);
1980 
1981 #define loop_stride 12
1982  uint8_t loop[12 * loop_stride];
1983 
1984  /* using division instead of shift to correctly handle negative values */
1985  x = 8 * bx + motion_x / motion_shift;
1986  y = 8 * by + motion_y / motion_shift;
1987 
1988  x_subpel = motion_x & subpel_mask;
1989  y_subpel = motion_y & subpel_mask;
1990 
1991  if (x_subpel || y_subpel) {
1992  x--;
1993  y--;
1994 
1995  if (x_subpel)
1996  x = FFMIN(x, x + FFSIGN(motion_x));
1997 
1998  if (y_subpel)
1999  y = FFMIN(y, y + FFSIGN(motion_y));
2000 
2001  x2 = x + block_width;
2002  y2 = y + block_width;
2003 
2004  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
2005  return 0;
2006 
2007  x_offset = (-(x + 2) & 7) + 2;
2008  y_offset = (-(y + 2) & 7) + 2;
2009 
2010  if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2011  return 0;
2012 
2013  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2014  loop_stride, stride,
2015  12, 12, src_x - 1, src_y - 1,
2016  plane_width,
2017  plane_height);
2018 
2019  if (x_offset <= 8 + x_subpel)
2020  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2021 
2022  if (y_offset <= 8 + y_subpel)
2023  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2024 
2025  } else {
2026 
2027  x_offset = -x & 7;
2028  y_offset = -y & 7;
2029 
2030  if (!x_offset && !y_offset)
2031  return 0;
2032 
2033  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2034  loop_stride, stride,
2035  12, 12, src_x - 1, src_y - 1,
2036  plane_width,
2037  plane_height);
2038 
2039 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2040  if ((uintptr_t)(ptr) & 7) \
2041  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2042  else \
2043  s->vp3dsp.name(ptr, stride, bounding_values);
2044 
2045  if (x_offset)
2046  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2047 
2048  if (y_offset)
2049  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2050  }
2051 
2052  for (int i = 0; i < 9; i++)
2053  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2054 
2055  return 1;
2056 }
2057 #endif
2058 
2059 /*
2060  * Perform the final rendering for a particular slice of data.
2061  * The slice number ranges from 0..(c_superblock_height - 1).
2062  */
2063 static void render_slice(Vp3DecodeContext *s, int slice)
2064 {
2065  int16_t *block = s->block;
2066  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2067  /* When decoding keyframes, the earlier frames may not be available,
2068  * so to avoid using undefined pointer arithmetic on them we just
2069  * use the current frame instead. Nothing is ever read from these
2070  * frames in case of a keyframe. */
2071  const AVFrame *last_frame = s->last_frame.f->data[0] ?
2072  s->last_frame.f : s->current_frame.f;
2073  const AVFrame *golden_frame = s->golden_frame.f->data[0] ?
2074  s->golden_frame.f : s->current_frame.f;
2075  int motion_halfpel_index;
2076  int first_pixel;
2077 
2078  if (slice >= s->c_superblock_height)
2079  return;
2080 
2081  for (int plane = 0; plane < 3; plane++) {
2082  uint8_t *output_plane = s->current_frame.f->data[plane] +
2083  s->data_offset[plane];
2084  const uint8_t *last_plane = last_frame->data[plane] +
2085  s->data_offset[plane];
2086  const uint8_t *golden_plane = golden_frame->data[plane] +
2087  s->data_offset[plane];
2088  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2089  int plane_width = s->width >> (plane && s->chroma_x_shift);
2090  int plane_height = s->height >> (plane && s->chroma_y_shift);
2091  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2092 
2093  int sb_y = slice << (!plane && s->chroma_y_shift);
2094  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2095  int slice_width = plane ? s->c_superblock_width
2096  : s->y_superblock_width;
2097 
2098  int fragment_width = s->fragment_width[!!plane];
2099  int fragment_height = s->fragment_height[!!plane];
2100  int fragment_start = s->fragment_start[plane];
2101 
2102  int do_await = !plane && HAVE_THREADS &&
2103  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2104 
2105  if (!s->flipped_image)
2106  stride = -stride;
2107  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2108  continue;
2109 
2110  /* for each superblock row in the slice (both of them)... */
2111  for (; sb_y < slice_height; sb_y++) {
2112  /* for each superblock in a row... */
2113  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2114  /* for each block in a superblock... */
2115  for (int j = 0; j < 16; j++) {
2116  int x = 4 * sb_x + hilbert_offset[j][0];
2117  int y = 4 * sb_y + hilbert_offset[j][1];
2118  int fragment = y * fragment_width + x;
2119 
2120  int i = fragment_start + fragment;
2121 
2122  // bounds check
2123  if (x >= fragment_width || y >= fragment_height)
2124  continue;
2125 
2126  first_pixel = 8 * y * stride + 8 * x;
2127 
2128  if (do_await &&
2129  s->all_fragments[i].coding_method != MODE_INTRA)
2130  await_reference_row(s, &s->all_fragments[i],
2131  motion_val[fragment][1],
2132  (16 * y) >> s->chroma_y_shift);
2133 
2134  /* transform if this block was coded */
2135  if (s->all_fragments[i].coding_method != MODE_COPY) {
2136  const uint8_t *motion_source;
2137  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2138  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2139  motion_source = golden_plane;
2140  else
2141  motion_source = last_plane;
2142 
2143  motion_source += first_pixel;
2144  motion_halfpel_index = 0;
2145 
2146  /* sort out the motion vector if this fragment is coded
2147  * using a motion vector method */
2148  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2149  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2150  int src_x, src_y;
2151  int standard_mc = 1;
2152  motion_x = motion_val[fragment][0];
2153  motion_y = motion_val[fragment][1];
2154 #if CONFIG_VP4_DECODER
2155  if (plane && s->version >= 2) {
2156  motion_x = (motion_x >> 1) | (motion_x & 1);
2157  motion_y = (motion_y >> 1) | (motion_y & 1);
2158  }
2159 #endif
2160 
2161  src_x = (motion_x >> 1) + 8 * x;
2162  src_y = (motion_y >> 1) + 8 * y;
2163 
2164  motion_halfpel_index = motion_x & 0x01;
2165  motion_source += (motion_x >> 1);
2166 
2167  motion_halfpel_index |= (motion_y & 0x01) << 1;
2168  motion_source += ((motion_y >> 1) * stride);
2169 
2170 #if CONFIG_VP4_DECODER
2171  if (s->version >= 2) {
2172  uint8_t *temp = s->edge_emu_buffer;
2173  if (stride < 0)
2174  temp -= 8 * stride;
2175  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2176  motion_source = temp;
2177  standard_mc = 0;
2178  }
2179  }
2180 #endif
2181 
2182  if (standard_mc && (
2183  src_x < 0 || src_y < 0 ||
2184  src_x + 9 >= plane_width ||
2185  src_y + 9 >= plane_height)) {
2186  uint8_t *temp = s->edge_emu_buffer;
2187  if (stride < 0)
2188  temp -= 8 * stride;
2189 
2190  s->vdsp.emulated_edge_mc(temp, motion_source,
2191  stride, stride,
2192  9, 9, src_x, src_y,
2193  plane_width,
2194  plane_height);
2195  motion_source = temp;
2196  }
2197  }
2198 
2199  /* first, take care of copying a block from either the
2200  * previous or the golden frame */
2201  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2202  /* Note, it is possible to implement all MC cases
2203  * with put_no_rnd_pixels_l2 which would look more
2204  * like the VP3 source but this would be slower as
2205  * put_no_rnd_pixels_tab is better optimized */
2206  if (motion_halfpel_index != 3) {
2207  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2208  output_plane + first_pixel,
2209  motion_source, stride, 8);
2210  } else {
2211  /* d is 0 if motion_x and _y have the same sign,
2212  * else -1 */
2213  int d = (motion_x ^ motion_y) >> 31;
2214  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2215  motion_source - d,
2216  motion_source + stride + 1 + d,
2217  stride, 8);
2218  }
2219  }
2220 
2221  /* invert DCT and place (or add) in final output */
2222 
2223  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2224  vp3_dequant(s, s->all_fragments + i,
2225  plane, 0, block);
2226  s->vp3dsp.idct_put(output_plane + first_pixel,
2227  stride,
2228  block);
2229  } else {
2230  if (vp3_dequant(s, s->all_fragments + i,
2231  plane, 1, block)) {
2232  s->vp3dsp.idct_add(output_plane + first_pixel,
2233  stride,
2234  block);
2235  } else {
2236  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2237  stride, block);
2238  }
2239  }
2240  } else {
2241  /* copy directly from the previous frame */
2242  s->hdsp.put_pixels_tab[1][0](
2243  output_plane + first_pixel,
2244  last_plane + first_pixel,
2245  stride, 8);
2246  }
2247  }
2248  }
2249 
2250  // Filter up to the last row in the superblock row
2251  if (s->version < 2 && !s->skip_loop_filter)
2252  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2253  FFMIN(4 * sb_y + 3, fragment_height - 1));
2254  }
2255  }
2256 
2257  /* this looks like a good place for slice dispatch... */
2258  /* algorithm:
2259  * if (slice == s->macroblock_height - 1)
2260  * dispatch (both last slice & 2nd-to-last slice);
2261  * else if (slice > 0)
2262  * dispatch (slice - 1);
2263  */
2264 
2265  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2266  s->height - 16));
2267 }
2268 
2269 static av_cold void init_tables_once(void)
2270 {
2272 
2274  SUPERBLOCK_VLC_BITS, 34,
2276  NULL, 0, 0, 1, 0);
2277 
2280  NULL, 0, 0, 0, 0);
2281 
2283  &motion_vector_vlc_table[0][1], 2,
2284  &motion_vector_vlc_table[0][0], 2, 1,
2285  -31, 0);
2286 
2288  mode_code_vlc_len, 1,
2289  NULL, 0, 0, 0, 0);
2290 
2291 #if CONFIG_VP4_DECODER
2292  for (int j = 0; j < 2; j++)
2293  for (int i = 0; i < 7; i++) {
2294  vp4_mv_vlc_table[j][i] =
2296  &vp4_mv_vlc[j][i][0][1], 2,
2297  &vp4_mv_vlc[j][i][0][0], 2, 1,
2298  -31, 0);
2299  }
2300 
2301  /* version >= 2 */
2302  for (int i = 0; i < 2; i++) {
2303  block_pattern_vlc[i] =
2304  ff_vlc_init_tables(&state, 5, 14,
2305  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2306  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0);
2307  }
2308 #endif
2309 }
2310 
2311 /// Allocate tables for per-frame data in Vp3DecodeContext
2313 {
2314  Vp3DecodeContext *s = avctx->priv_data;
2315  int y_fragment_count, c_fragment_count;
2316 
2317  free_tables(avctx);
2318 
2319  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2320  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2321 
2322  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2323  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2324  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2325 
2326  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2327  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2328  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2329 
2330  s->dct_tokens_base = av_calloc(s->fragment_count,
2331  64 * sizeof(*s->dct_tokens_base));
2332  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2333  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2334 
2335  /* work out the block mapping tables */
2336  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2337  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2338 
2339  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2340 
2341  if (!s->superblock_coding || !s->all_fragments ||
2342  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2343  !s->nkf_coded_fragment_list ||
2344  !s->superblock_fragments || !s->macroblock_coding ||
2345  !s->dc_pred_row ||
2346  !s->motion_val[0] || !s->motion_val[1]) {
2347  return -1;
2348  }
2349 
2351 
2352  return 0;
2353 }
2354 
2356 {
2357  s->current_frame.f = av_frame_alloc();
2358  s->last_frame.f = av_frame_alloc();
2359  s->golden_frame.f = av_frame_alloc();
2360 
2361  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f)
2362  return AVERROR(ENOMEM);
2363 
2364  return 0;
2365 }
2366 
2367 static av_cold void free_vlc_tables(FFRefStructOpaque unused, void *obj)
2368 {
2369  CoeffVLCs *vlcs = obj;
2370 
2371  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++)
2372  ff_vlc_free(&vlcs->vlcs[i]);
2373 }
2374 
2376 {
2377  static AVOnce init_static_once = AV_ONCE_INIT;
2378  Vp3DecodeContext *s = avctx->priv_data;
2379  int ret;
2380  int c_width;
2381  int c_height;
2382  int y_fragment_count, c_fragment_count;
2383 
2384  ret = init_frames(s);
2385  if (ret < 0)
2386  return ret;
2387 
2388  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2389  s->version = 3;
2390 #if !CONFIG_VP4_DECODER
2391  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2393 #endif
2394  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2395  s->version = 0;
2396  else
2397  s->version = 1;
2398 
2399  s->avctx = avctx;
2400  s->width = FFALIGN(avctx->coded_width, 16);
2401  s->height = FFALIGN(avctx->coded_height, 16);
2402  if (s->width < 18)
2403  return AVERROR_PATCHWELCOME;
2404  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2405  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2407  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2408  ff_videodsp_init(&s->vdsp, 8);
2409  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2410 
2411  for (int i = 0; i < 64; i++) {
2412 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2413  s->idct_permutation[i] = TRANSPOSE(i);
2414  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2415 #undef TRANSPOSE
2416  }
2417 
2418  /* initialize to an impossible value which will force a recalculation
2419  * in the first frame decode */
2420  for (int i = 0; i < 3; i++)
2421  s->qps[i] = -1;
2422 
2423  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2424  if (ret)
2425  return ret;
2426 
2427  s->y_superblock_width = (s->width + 31) / 32;
2428  s->y_superblock_height = (s->height + 31) / 32;
2429  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2430 
2431  /* work out the dimensions for the C planes */
2432  c_width = s->width >> s->chroma_x_shift;
2433  c_height = s->height >> s->chroma_y_shift;
2434  s->c_superblock_width = (c_width + 31) / 32;
2435  s->c_superblock_height = (c_height + 31) / 32;
2436  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2437 
2438  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2439  s->u_superblock_start = s->y_superblock_count;
2440  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2441 
2442  s->macroblock_width = (s->width + 15) / 16;
2443  s->macroblock_height = (s->height + 15) / 16;
2444  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2445  s->c_macroblock_width = (c_width + 15) / 16;
2446  s->c_macroblock_height = (c_height + 15) / 16;
2447  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2448  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2449 
2450  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2451  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2452  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2453  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2454 
2455  /* fragment count covers all 8x8 blocks for all 3 planes */
2456  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2457  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2458  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2459  s->fragment_start[1] = y_fragment_count;
2460  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2461 
2462  if (!s->theora_tables) {
2463  for (int i = 0; i < 64; i++) {
2464  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2465  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2466  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2467  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2468  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2469  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2470  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2471  }
2472 
2473  for (int inter = 0; inter < 2; inter++) {
2474  for (int plane = 0; plane < 3; plane++) {
2475  s->qr_count[inter][plane] = 1;
2476  s->qr_size[inter][plane][0] = 63;
2477  s->qr_base[inter][plane][0] =
2478  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2479  }
2480  }
2481  }
2482 
2483  if (!avctx->internal->is_copy) {
2484  CoeffVLCs *vlcs = ff_refstruct_alloc_ext(sizeof(*s->coeff_vlc), 0,
2486  if (!vlcs)
2487  return AVERROR(ENOMEM);
2488 
2489  s->coeff_vlc = vlcs;
2490 
2491  if (!s->theora_tables) {
2492  const uint8_t (*bias_tabs)[32][2];
2493 
2494  /* init VLC tables */
2495  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2496  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2497  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, 32,
2498  &bias_tabs[i][0][1], 2,
2499  &bias_tabs[i][0][0], 2, 1,
2500  0, 0, avctx);
2501  if (ret < 0)
2502  return ret;
2503  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2504  }
2505  } else {
2506  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2507  const HuffTable *tab = &s->huffman_table[i];
2508 
2509  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, tab->nb_entries,
2510  &tab->entries[0].len, sizeof(*tab->entries),
2511  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2512  0, 0, avctx);
2513  if (ret < 0)
2514  return ret;
2515  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2516  }
2517  }
2518  }
2519 
2520  ff_thread_once(&init_static_once, init_tables_once);
2521 
2522  return allocate_tables(avctx);
2523 }
2524 
2525 /// Release and shuffle frames after decode finishes
2526 static int update_frames(AVCodecContext *avctx)
2527 {
2528  Vp3DecodeContext *s = avctx->priv_data;
2529  int ret = 0;
2530 
2531  if (s->keyframe) {
2532  ff_thread_release_ext_buffer(&s->golden_frame);
2533  ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
2534  }
2535  /* shuffle frames */
2536  ff_thread_release_ext_buffer(&s->last_frame);
2537  FFSWAP(ThreadFrame, s->last_frame, s->current_frame);
2538 
2539  return ret;
2540 }
2541 
2542 #if HAVE_THREADS
2543 static int ref_frame(ThreadFrame *dst, const ThreadFrame *src)
2544 {
2546  if (src->f->data[0])
2547  return ff_thread_ref_frame(dst, src);
2548  return 0;
2549 }
2550 
2551 static int ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2552 {
2553  int ret;
2554  if ((ret = ref_frame(&dst->current_frame, &src->current_frame)) < 0 ||
2555  (ret = ref_frame(&dst->golden_frame, &src->golden_frame)) < 0 ||
2556  (ret = ref_frame(&dst->last_frame, &src->last_frame)) < 0)
2557  return ret;
2558  return 0;
2559 }
2560 
2561 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2562 {
2563  Vp3DecodeContext *s = dst->priv_data;
2564  const Vp3DecodeContext *s1 = src->priv_data;
2565  int qps_changed = 0, err;
2566 
2567  ff_refstruct_replace(&s->coeff_vlc, s1->coeff_vlc);
2568 
2569  if (!s1->current_frame.f->data[0] ||
2570  s->width != s1->width || s->height != s1->height) {
2571  if (s != s1)
2572  ref_frames(s, s1);
2573  return -1;
2574  }
2575 
2576  if (s != s1) {
2577  // copy previous frame data
2578  if ((err = ref_frames(s, s1)) < 0)
2579  return err;
2580 
2581  s->keyframe = s1->keyframe;
2582 
2583  // copy qscale data if necessary
2584  for (int i = 0; i < 3; i++) {
2585  if (s->qps[i] != s1->qps[1]) {
2586  qps_changed = 1;
2587  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2588  }
2589  }
2590 
2591  if (s->qps[0] != s1->qps[0])
2592  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2593  sizeof(s->bounding_values_array));
2594 
2595  if (qps_changed) {
2596  memcpy(s->qps, s1->qps, sizeof(s->qps));
2597  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2598  s->nqps = s1->nqps;
2599  }
2600  }
2601 
2602  return update_frames(dst);
2603 }
2604 #endif
2605 
2607  int *got_frame, AVPacket *avpkt)
2608 {
2609  const uint8_t *buf = avpkt->data;
2610  int buf_size = avpkt->size;
2611  Vp3DecodeContext *s = avctx->priv_data;
2612  GetBitContext gb;
2613  int ret;
2614 
2615  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2616  return ret;
2617 
2618 #if CONFIG_THEORA_DECODER
2619  if (s->theora && get_bits1(&gb)) {
2620  int type = get_bits(&gb, 7);
2621  skip_bits_long(&gb, 6*8); /* "theora" */
2622 
2623  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2624  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2625  return AVERROR_PATCHWELCOME;
2626  }
2627  if (type == 0) {
2628  vp3_decode_end(avctx);
2629  ret = theora_decode_header(avctx, &gb);
2630 
2631  if (ret >= 0)
2632  ret = vp3_decode_init(avctx);
2633  if (ret < 0) {
2634  vp3_decode_end(avctx);
2635  return ret;
2636  }
2637  return buf_size;
2638  } else if (type == 2) {
2639  vp3_decode_end(avctx);
2640  ret = theora_decode_tables(avctx, &gb);
2641  if (ret >= 0)
2642  ret = vp3_decode_init(avctx);
2643  if (ret < 0) {
2644  vp3_decode_end(avctx);
2645  return ret;
2646  }
2647  return buf_size;
2648  }
2649 
2650  av_log(avctx, AV_LOG_ERROR,
2651  "Header packet passed to frame decoder, skipping\n");
2652  return -1;
2653  }
2654 #endif
2655 
2656  s->keyframe = !get_bits1(&gb);
2657  if (!s->all_fragments) {
2658  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2659  return -1;
2660  }
2661  if (!s->theora)
2662  skip_bits(&gb, 1);
2663  for (int i = 0; i < 3; i++)
2664  s->last_qps[i] = s->qps[i];
2665 
2666  s->nqps = 0;
2667  do {
2668  s->qps[s->nqps++] = get_bits(&gb, 6);
2669  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2670  for (int i = s->nqps; i < 3; i++)
2671  s->qps[i] = -1;
2672 
2673  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2674  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2675  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2676 
2677  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2678  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2679  : AVDISCARD_NONKEY);
2680 
2681  if (s->qps[0] != s->last_qps[0])
2683 
2684  for (int i = 0; i < s->nqps; i++)
2685  // reinit all dequantizers if the first one changed, because
2686  // the DC of the first quantizer must be used for all matrices
2687  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2688  init_dequantizer(s, i);
2689 
2690  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2691  return buf_size;
2692 
2693  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2695  if (s->keyframe)
2696  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2697  else
2698  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2699  if ((ret = ff_thread_get_ext_buffer(avctx, &s->current_frame,
2700  AV_GET_BUFFER_FLAG_REF)) < 0)
2701  goto error;
2702 
2703  if (!s->edge_emu_buffer) {
2704  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2705  if (!s->edge_emu_buffer) {
2706  ret = AVERROR(ENOMEM);
2707  goto error;
2708  }
2709  }
2710 
2711  if (s->keyframe) {
2712  if (!s->theora) {
2713  skip_bits(&gb, 4); /* width code */
2714  skip_bits(&gb, 4); /* height code */
2715  if (s->version) {
2716  int version = get_bits(&gb, 5);
2717 #if !CONFIG_VP4_DECODER
2718  if (version >= 2) {
2719  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2721  }
2722 #endif
2723  s->version = version;
2724  if (avctx->frame_num == 0)
2725  av_log(s->avctx, AV_LOG_DEBUG,
2726  "VP version: %d\n", s->version);
2727  }
2728  }
2729  if (s->version || s->theora) {
2730  if (get_bits1(&gb))
2731  av_log(s->avctx, AV_LOG_ERROR,
2732  "Warning, unsupported keyframe coding type?!\n");
2733  skip_bits(&gb, 2); /* reserved? */
2734 
2735 #if CONFIG_VP4_DECODER
2736  if (s->version >= 2) {
2737  int mb_height, mb_width;
2738  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2739 
2740  mb_height = get_bits(&gb, 8);
2741  mb_width = get_bits(&gb, 8);
2742  if (mb_height != s->macroblock_height ||
2743  mb_width != s->macroblock_width)
2744  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2745 
2746  mb_width_mul = get_bits(&gb, 5);
2747  mb_width_div = get_bits(&gb, 3);
2748  mb_height_mul = get_bits(&gb, 5);
2749  mb_height_div = get_bits(&gb, 3);
2750  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2751  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2752 
2753  if (get_bits(&gb, 2))
2754  avpriv_request_sample(s->avctx, "unknown bits");
2755  }
2756 #endif
2757  }
2758  } else {
2759  if (!s->golden_frame.f->data[0]) {
2760  av_log(s->avctx, AV_LOG_WARNING,
2761  "vp3: first frame not a keyframe\n");
2762 
2763  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2764  if ((ret = ff_thread_get_ext_buffer(avctx, &s->golden_frame,
2765  AV_GET_BUFFER_FLAG_REF)) < 0)
2766  goto error;
2767  ff_thread_release_ext_buffer(&s->last_frame);
2768  if ((ret = ff_thread_ref_frame(&s->last_frame,
2769  &s->golden_frame)) < 0)
2770  goto error;
2771  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2772  }
2773  }
2774  ff_thread_finish_setup(avctx);
2775 
2776  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2777 
2778  if (s->version < 2) {
2779  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2780  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2781  goto error;
2782  }
2783 #if CONFIG_VP4_DECODER
2784  } else {
2785  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2786  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2787  goto error;
2788  }
2789 #endif
2790  }
2791  if ((ret = unpack_modes(s, &gb)) < 0) {
2792  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2793  goto error;
2794  }
2795  if (ret = unpack_vectors(s, &gb)) {
2796  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2797  goto error;
2798  }
2799  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2800  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2801  goto error;
2802  }
2803 
2804  if (s->version < 2) {
2805  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2806  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2807  goto error;
2808  }
2809 #if CONFIG_VP4_DECODER
2810  } else {
2811  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2812  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2813  goto error;
2814  }
2815 #endif
2816  }
2817 
2818  for (int i = 0; i < 3; i++) {
2819  int height = s->height >> (i && s->chroma_y_shift);
2820  if (s->flipped_image)
2821  s->data_offset[i] = 0;
2822  else
2823  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2824  }
2825 
2826  s->last_slice_end = 0;
2827  for (int i = 0; i < s->c_superblock_height; i++)
2828  render_slice(s, i);
2829 
2830  // filter the last row
2831  if (s->version < 2)
2832  for (int i = 0; i < 3; i++) {
2833  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2834  apply_loop_filter(s, i, row, row + 1);
2835  }
2836  vp3_draw_horiz_band(s, s->height);
2837 
2838  /* output frame, offset as needed */
2839  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2840  return ret;
2841 
2842  frame->crop_left = s->offset_x;
2843  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2844  frame->crop_top = s->offset_y;
2845  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2846 
2847  *got_frame = 1;
2848 
2849  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
2850  ret = update_frames(avctx);
2851  if (ret < 0)
2852  return ret;
2853  }
2854 
2855  return buf_size;
2856 
2857 error:
2858  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2859 
2860  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2861  av_frame_unref(s->current_frame.f);
2862 
2863  return ret;
2864 }
2865 
2866 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2867  AVCodecContext *avctx)
2868 {
2869  if (get_bits1(gb)) {
2870  int token;
2871  if (huff->nb_entries >= 32) { /* overflow */
2872  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2873  return -1;
2874  }
2875  token = get_bits(gb, 5);
2876  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2877  length, huff->nb_entries, token);
2878  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2879  } else {
2880  /* The following bound follows from the fact that nb_entries <= 32. */
2881  if (length >= 31) { /* overflow */
2882  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2883  return -1;
2884  }
2885  length++;
2886  if (read_huffman_tree(huff, gb, length, avctx))
2887  return -1;
2888  if (read_huffman_tree(huff, gb, length, avctx))
2889  return -1;
2890  }
2891  return 0;
2892 }
2893 
2894 #if CONFIG_THEORA_DECODER
2895 static const enum AVPixelFormat theora_pix_fmts[4] = {
2897 };
2898 
2899 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2900 {
2901  Vp3DecodeContext *s = avctx->priv_data;
2902  int visible_width, visible_height, colorspace;
2903  uint8_t offset_x = 0, offset_y = 0;
2904  int ret;
2905  AVRational fps, aspect;
2906 
2907  if (get_bits_left(gb) < 206)
2908  return AVERROR_INVALIDDATA;
2909 
2910  s->theora_header = 0;
2911  s->theora = get_bits(gb, 24);
2912  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2913  if (!s->theora) {
2914  s->theora = 1;
2915  avpriv_request_sample(s->avctx, "theora 0");
2916  }
2917 
2918  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2919  * but previous versions have the image flipped relative to vp3 */
2920  if (s->theora < 0x030200) {
2921  s->flipped_image = 1;
2922  av_log(avctx, AV_LOG_DEBUG,
2923  "Old (<alpha3) Theora bitstream, flipped image\n");
2924  }
2925 
2926  visible_width =
2927  s->width = get_bits(gb, 16) << 4;
2928  visible_height =
2929  s->height = get_bits(gb, 16) << 4;
2930 
2931  if (s->theora >= 0x030200) {
2932  visible_width = get_bits(gb, 24);
2933  visible_height = get_bits(gb, 24);
2934 
2935  offset_x = get_bits(gb, 8); /* offset x */
2936  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2937  }
2938 
2939  /* sanity check */
2940  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2941  visible_width + offset_x > s->width ||
2942  visible_height + offset_y > s->height ||
2943  visible_width < 18
2944  ) {
2945  av_log(avctx, AV_LOG_ERROR,
2946  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2947  visible_width, visible_height, offset_x, offset_y,
2948  s->width, s->height);
2949  return AVERROR_INVALIDDATA;
2950  }
2951 
2952  fps.num = get_bits_long(gb, 32);
2953  fps.den = get_bits_long(gb, 32);
2954  if (fps.num && fps.den) {
2955  if (fps.num < 0 || fps.den < 0) {
2956  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2957  return AVERROR_INVALIDDATA;
2958  }
2959  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2960  fps.den, fps.num, 1 << 30);
2961  }
2962 
2963  aspect.num = get_bits(gb, 24);
2964  aspect.den = get_bits(gb, 24);
2965  if (aspect.num && aspect.den) {
2967  &avctx->sample_aspect_ratio.den,
2968  aspect.num, aspect.den, 1 << 30);
2969  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2970  }
2971 
2972  if (s->theora < 0x030200)
2973  skip_bits(gb, 5); /* keyframe frequency force */
2974  colorspace = get_bits(gb, 8);
2975  skip_bits(gb, 24); /* bitrate */
2976 
2977  skip_bits(gb, 6); /* quality hint */
2978 
2979  if (s->theora >= 0x030200) {
2980  skip_bits(gb, 5); /* keyframe frequency force */
2981  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2982  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2983  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2984  return AVERROR_INVALIDDATA;
2985  }
2986  skip_bits(gb, 3); /* reserved */
2987  } else
2988  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2989 
2990  if (s->width < 18)
2991  return AVERROR_PATCHWELCOME;
2992  ret = ff_set_dimensions(avctx, s->width, s->height);
2993  if (ret < 0)
2994  return ret;
2995  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2996  avctx->width = visible_width;
2997  avctx->height = visible_height;
2998  // translate offsets from theora axis ([0,0] lower left)
2999  // to normal axis ([0,0] upper left)
3000  s->offset_x = offset_x;
3001  s->offset_y = s->height - visible_height - offset_y;
3002  }
3003 
3004  if (colorspace == 1)
3006  else if (colorspace == 2)
3008 
3009  if (colorspace == 1 || colorspace == 2) {
3010  avctx->colorspace = AVCOL_SPC_BT470BG;
3011  avctx->color_trc = AVCOL_TRC_BT709;
3012  }
3013 
3014  s->theora_header = 1;
3015  return 0;
3016 }
3017 
3018 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
3019 {
3020  Vp3DecodeContext *s = avctx->priv_data;
3021  int n, matrices, ret;
3022 
3023  if (!s->theora_header)
3024  return AVERROR_INVALIDDATA;
3025 
3026  if (s->theora >= 0x030200) {
3027  n = get_bits(gb, 3);
3028  /* loop filter limit values table */
3029  if (n)
3030  for (int i = 0; i < 64; i++)
3031  s->filter_limit_values[i] = get_bits(gb, n);
3032  }
3033 
3034  if (s->theora >= 0x030200)
3035  n = get_bits(gb, 4) + 1;
3036  else
3037  n = 16;
3038  /* quality threshold table */
3039  for (int i = 0; i < 64; i++)
3040  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3041 
3042  if (s->theora >= 0x030200)
3043  n = get_bits(gb, 4) + 1;
3044  else
3045  n = 16;
3046  /* dc scale factor table */
3047  for (int i = 0; i < 64; i++)
3048  s->coded_dc_scale_factor[0][i] =
3049  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3050 
3051  if (s->theora >= 0x030200)
3052  matrices = get_bits(gb, 9) + 1;
3053  else
3054  matrices = 3;
3055 
3056  if (matrices > 384) {
3057  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3058  return -1;
3059  }
3060 
3061  for (int j = 0; j < matrices; j++)
3062  for (int i = 0; i < 64; i++)
3063  s->base_matrix[j][i] = get_bits(gb, 8);
3064 
3065  for (int inter = 0; inter <= 1; inter++) {
3066  for (int plane = 0; plane <= 2; plane++) {
3067  int newqr = 1;
3068  if (inter || plane > 0)
3069  newqr = get_bits1(gb);
3070  if (!newqr) {
3071  int qtj, plj;
3072  if (inter && get_bits1(gb)) {
3073  qtj = 0;
3074  plj = plane;
3075  } else {
3076  qtj = (3 * inter + plane - 1) / 3;
3077  plj = (plane + 2) % 3;
3078  }
3079  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3080  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3081  sizeof(s->qr_size[0][0]));
3082  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3083  sizeof(s->qr_base[0][0]));
3084  } else {
3085  int qri = 0;
3086  int qi = 0;
3087 
3088  for (;;) {
3089  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3090  if (i >= matrices) {
3091  av_log(avctx, AV_LOG_ERROR,
3092  "invalid base matrix index\n");
3093  return -1;
3094  }
3095  s->qr_base[inter][plane][qri] = i;
3096  if (qi >= 63)
3097  break;
3098  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3099  s->qr_size[inter][plane][qri++] = i;
3100  qi += i;
3101  }
3102 
3103  if (qi > 63) {
3104  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3105  return -1;
3106  }
3107  s->qr_count[inter][plane] = qri;
3108  }
3109  }
3110  }
3111 
3112  /* Huffman tables */
3113  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3114  s->huffman_table[i].nb_entries = 0;
3115  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3116  return ret;
3117  }
3118 
3119  s->theora_tables = 1;
3120 
3121  return 0;
3122 }
3123 
3124 static av_cold int theora_decode_init(AVCodecContext *avctx)
3125 {
3126  Vp3DecodeContext *s = avctx->priv_data;
3127  GetBitContext gb;
3128  int ptype;
3129  const uint8_t *header_start[3];
3130  int header_len[3];
3131  int ret;
3132 
3133  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3134 
3135  s->theora = 1;
3136 
3137  if (!avctx->extradata_size) {
3138  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3139  return -1;
3140  }
3141 
3143  42, header_start, header_len) < 0) {
3144  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3145  return -1;
3146  }
3147 
3148  for (int i = 0; i < 3; i++) {
3149  if (header_len[i] <= 0)
3150  continue;
3151  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3152  if (ret < 0)
3153  return ret;
3154 
3155  ptype = get_bits(&gb, 8);
3156 
3157  if (!(ptype & 0x80)) {
3158  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3159 // return -1;
3160  }
3161 
3162  // FIXME: Check for this as well.
3163  skip_bits_long(&gb, 6 * 8); /* "theora" */
3164 
3165  switch (ptype) {
3166  case 0x80:
3167  if (theora_decode_header(avctx, &gb) < 0)
3168  return -1;
3169  break;
3170  case 0x81:
3171 // FIXME: is this needed? it breaks sometimes
3172 // theora_decode_comments(avctx, gb);
3173  break;
3174  case 0x82:
3175  if (theora_decode_tables(avctx, &gb))
3176  return -1;
3177  break;
3178  default:
3179  av_log(avctx, AV_LOG_ERROR,
3180  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3181  break;
3182  }
3183  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3184  av_log(avctx, AV_LOG_WARNING,
3185  "%d bits left in packet %X\n",
3186  get_bits_left(&gb), ptype);
3187  if (s->theora < 0x030200)
3188  break;
3189  }
3190 
3191  return vp3_decode_init(avctx);
3192 }
3193 
3194 const FFCodec ff_theora_decoder = {
3195  .p.name = "theora",
3196  CODEC_LONG_NAME("Theora"),
3197  .p.type = AVMEDIA_TYPE_VIDEO,
3198  .p.id = AV_CODEC_ID_THEORA,
3199  .priv_data_size = sizeof(Vp3DecodeContext),
3200  .init = theora_decode_init,
3201  .close = vp3_decode_end,
3203  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3205  .flush = vp3_decode_flush,
3206  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3207  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3209 };
3210 #endif
3211 
3213  .p.name = "vp3",
3214  CODEC_LONG_NAME("On2 VP3"),
3215  .p.type = AVMEDIA_TYPE_VIDEO,
3216  .p.id = AV_CODEC_ID_VP3,
3217  .priv_data_size = sizeof(Vp3DecodeContext),
3218  .init = vp3_decode_init,
3219  .close = vp3_decode_end,
3221  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3223  .flush = vp3_decode_flush,
3224  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3225  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3227 };
3228 
3229 #if CONFIG_VP4_DECODER
3230 const FFCodec ff_vp4_decoder = {
3231  .p.name = "vp4",
3232  CODEC_LONG_NAME("On2 VP4"),
3233  .p.type = AVMEDIA_TYPE_VIDEO,
3234  .p.id = AV_CODEC_ID_VP4,
3235  .priv_data_size = sizeof(Vp3DecodeContext),
3236  .init = vp3_decode_init,
3237  .close = vp3_decode_end,
3239  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3241  .flush = vp3_decode_flush,
3242  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3243  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3245 };
3246 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2312
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1854
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:68
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:98
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:247
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
VP3DSPContext
Definition: vp3dsp.h:25
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp4_get_mv
static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:893
vp3_decode_flush
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:351
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:237
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:86
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
VP4Predictor
Definition: vp3.c:176
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:208
thread.h
HuffEntry::len
uint8_t len
Definition: exr.c:95
VP4Predictor::dc
int dc
Definition: vp3.c:177
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
ff_refstruct_alloc_ext
static void * ff_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(FFRefStructOpaque opaque, void *obj))
A wrapper around ff_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
superblock_run_length_vlc
static VLCElem superblock_run_length_vlc[88]
Definition: vp3.c:164
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2866
PUR
#define PUR
vp3dsp.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:678
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3212
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:244
mode_code_vlc
static VLCElem mode_code_vlc[24+2108 *CONFIG_VP4_DECODER]
Definition: vp3.c:169
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:149
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:325
FFCodec
Definition: codec_internal.h:127
fragment_run_length_vlc
static VLCElem fragment_run_length_vlc[56]
Definition: vp3.c:165
motion_vector_vlc
static VLCElem motion_vector_vlc[112]
Definition: vp3.c:166
base
uint8_t base
Definition: vp3data.h:128
FFRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:69
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:474
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2063
CoeffVLCs::vlc_tabs
const VLCElem * vlc_tabs[80]
Definition: vp3.c:193
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
Vp3DecodeContext::height
int height
Definition: vp3.c:201
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:114
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:375
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
fragment
Definition: dashdec.c:36
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:224
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+2]
Definition: vp3.c:326
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:560
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:314
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1156
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:257
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:209
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:648
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:222
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:90
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
CoeffVLCs
Definition: vp3.c:192
GetBitContext
Definition: get_bits.h:108
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2990
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:85
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:223
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:248
Vp3DecodeContext::theora
int theora
Definition: vp3.c:199
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:338
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
refstruct.h
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:297
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:281
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:153
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1941
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:671
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
free_vlc_tables
static av_cold void free_vlc_tables(FFRefStructOpaque unused, void *obj)
Definition: vp3.c:2367
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:138
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
VLCInitState
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
Definition: vlc.h:209
emms_c
#define emms_c()
Definition: emms.h:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:242
CoeffVLCs::vlcs
VLC vlcs[80]
Definition: vp3.c:194
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:150
state
static struct @382 state
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
s
#define s(width, name)
Definition: cbs_vp9.c:198
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:465
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
s1
#define s1
Definition: regdef.h:38
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:189
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:390
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:74
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:73
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:70
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1639
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1304
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:100
AVFrame::crop_right
size_t crop_right
Definition: frame.h:720
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:54
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:562
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:233
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:850
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:207
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:423
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:89
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:225
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:249
NULL
#define NULL
Definition: coverity.c:32
init_frames
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:2355
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
HuffTable
Used to store optimal huffman encoding results.
Definition: mjpegenc_huffman.h:69
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:790
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:221
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:147
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:996
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:199
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:282
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:215
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
update_frames
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2526
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:219
Vp3DecodeContext::coeff_vlc
CoeffVLCs * coeff_vlc
The first 16 of the following VLCs are for the dc coefficients; the others are four groups of 16 VLCs...
Definition: vp3.c:304
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:296
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:258
AVOnce
#define AVOnce
Definition: thread.h:202
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1637
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:718
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:211
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:213
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:146
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:241
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:289
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:75
AVFrame::crop_left
size_t crop_left
Definition: frame.h:719
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:523
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:148
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:908
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vp4_decoder
const FFCodec ff_vp4_decoder
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
VLCElem
Definition: vlc.h:32
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:341
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:278
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:254
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:217
Vp3DecodeContext::current_frame
ThreadFrame current_frame
Definition: vp3.c:205
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:212
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: vvcdec.c:552
height
#define height
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:202
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:246
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:318
version
version
Definition: libkvazaar.c:321
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:582
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:198
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:296
eob_run_table
static const struct @198 eob_run_table[7]
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:206
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:83
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1788
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:234
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:238
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:320
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:235
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2606
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:232
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:62
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:702
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:229
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:226
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:968
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:61
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:293
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:227
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:560
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:577
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:199
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:335
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLCElem *vlc_table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1184
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:84
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:288
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::golden_frame
ThreadFrame golden_frame
Definition: vp3.c:203
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:202
BLOCK_X
#define BLOCK_X
Definition: vp3.c:647
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:93
Vp3DecodeContext
Definition: vp3.c:197
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:88
coeff_vlc
static const VLCElem * coeff_vlc[2][8][4]
Definition: atrac9dec.c:109
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:64
AVCodecContext
main external API structure.
Definition: avcodec.h:445
ThreadFrame
Definition: threadframe.h:27
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1899
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:705
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VLC
Definition: vlc.h:36
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:255
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1030
HuffEntry
Definition: exr.c:94
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:263
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
VLC::table
VLCElem * table
Definition: vlc.h:38
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:82
VideoDSPContext
Definition: videodsp.h:40
ff_vlc_init_tables_from_lengths
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
Definition: vlc.c:366
HuffEntry::sym
uint8_t sym
Definition: vp3.c:184
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:230
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1635
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
Vp3DecodeContext::last_frame
ThreadFrame last_frame
Definition: vp3.c:204
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:245
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vlc_init_tables
static const VLCElem * ff_vlc_init_tables(VLCInitState *state, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, int flags)
Definition: vlc.h:243
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1148
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:188
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:277
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:323
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
VLC_INIT_STATE
#define VLC_INIT_STATE(_table)
Definition: vlc.h:214
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
VP4Predictor::type
int type
Definition: vp3.c:178
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2375
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:256
AVFrame::crop_top
size_t crop_top
Definition: frame.h:717
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:60
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:240
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
d
d
Definition: ffmpeg_filter.c:425
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:201
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:295
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1105
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:259
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:363
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:80
init_tables_once
static av_cold void init_tables_once(void)
Definition: vp3.c:2269
Vp3DecodeContext::version
int version
Definition: vp3.c:200
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:251
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:214
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:328
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:228
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:279
Vp3Fragment
Definition: vp3.c:67
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:218
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:308
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:210
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:280
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:236