FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 
37 #include "libavutil/imgutils.h"
38 
39 #include "avcodec.h"
40 #include "get_bits.h"
41 #include "hpeldsp.h"
42 #include "internal.h"
43 #include "mathops.h"
44 #include "thread.h"
45 #include "videodsp.h"
46 #include "vp3data.h"
47 #include "vp4data.h"
48 #include "vp3dsp.h"
49 #include "xiph.h"
50 
51 #define FRAGMENT_PIXELS 8
52 
53 // FIXME split things out into their own arrays
54 typedef struct Vp3Fragment {
55  int16_t dc;
58 } Vp3Fragment;
59 
60 #define SB_NOT_CODED 0
61 #define SB_PARTIALLY_CODED 1
62 #define SB_FULLY_CODED 2
63 
64 // This is the maximum length of a single long bit run that can be encoded
65 // for superblock coding or block qps. Theora special-cases this to read a
66 // bit instead of flipping the current bit to allow for runs longer than 4129.
67 #define MAXIMUM_LONG_BIT_RUN 4129
68 
69 #define MODE_INTER_NO_MV 0
70 #define MODE_INTRA 1
71 #define MODE_INTER_PLUS_MV 2
72 #define MODE_INTER_LAST_MV 3
73 #define MODE_INTER_PRIOR_LAST 4
74 #define MODE_USING_GOLDEN 5
75 #define MODE_GOLDEN_MV 6
76 #define MODE_INTER_FOURMV 7
77 #define CODING_MODE_COUNT 8
78 
79 /* special internal mode */
80 #define MODE_COPY 8
81 
82 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
83 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
84 
85 
86 /* There are 6 preset schemes, plus a free-form scheme */
87 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
88  /* scheme 1: Last motion vector dominates */
93 
94  /* scheme 2 */
98  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
99 
100  /* scheme 3 */
104  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
105 
106  /* scheme 4 */
110  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
111 
112  /* scheme 5: No motion vector dominates */
116  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
117 
118  /* scheme 6 */
122  MODE_GOLDEN_MV, MODE_INTER_FOURMV },
123 };
124 
125 static const uint8_t hilbert_offset[16][2] = {
126  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
127  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
128  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
129  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
130 };
131 
132 enum {
138 };
139 
140 static const uint8_t vp4_pred_block_type_map[8] = {
149 };
150 
151 typedef struct {
152  int dc;
153  int type;
154 } VP4Predictor;
155 
156 #define MIN_DEQUANT_VAL 2
157 
158 typedef struct HuffEntry {
160 } HuffEntry;
161 
162 typedef struct HuffTable {
163  HuffEntry entries[32];
165 } HuffTable;
166 
167 typedef struct Vp3DecodeContext {
170  int version;
171  int width, height;
172  int chroma_x_shift, chroma_y_shift;
176  int keyframe;
177  uint8_t idct_permutation[64];
178  uint8_t idct_scantable[64];
182  DECLARE_ALIGNED(16, int16_t, block)[64];
186 
187  int qps[3];
188  int nqps;
189  int last_qps[3];
190 
200  unsigned char *superblock_coding;
201 
202  int macroblock_count; /* y macroblock count */
208  int yuv_macroblock_count; /* y+u+v macroblock count */
209 
211  int fragment_width[2];
212  int fragment_height[2];
213 
215  int fragment_start[3];
216  int data_offset[3];
220 
221  int8_t (*motion_val[2])[2];
222 
223  /* tables */
224  uint16_t coded_dc_scale_factor[2][64];
225  uint32_t coded_ac_scale_factor[64];
226  uint8_t base_matrix[384][64];
227  uint8_t qr_count[2][3];
228  uint8_t qr_size[2][3][64];
229  uint16_t qr_base[2][3][64];
230 
231  /**
232  * This is a list of all tokens in bitstream order. Reordering takes place
233  * by pulling from each level during IDCT. As a consequence, IDCT must be
234  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
235  * otherwise. The 32 different tokens with up to 12 bits of extradata are
236  * collapsed into 3 types, packed as follows:
237  * (from the low to high bits)
238  *
239  * 2 bits: type (0,1,2)
240  * 0: EOB run, 14 bits for run length (12 needed)
241  * 1: zero run, 7 bits for run length
242  * 7 bits for the next coefficient (3 needed)
243  * 2: coefficient, 14 bits (11 needed)
244  *
245  * Coefficients are signed, so are packed in the highest bits for automatic
246  * sign extension.
247  */
248  int16_t *dct_tokens[3][64];
249  int16_t *dct_tokens_base;
250 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
251 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
252 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
253 
254  /**
255  * number of blocks that contain DCT coefficients at
256  * the given level or higher
257  */
258  int num_coded_frags[3][64];
260 
261  /* this is a list of indexes into the all_fragments array indicating
262  * which of the fragments are coded */
263  int *coded_fragment_list[3];
264 
267  int num_kf_coded_fragment[3];
268 
269  /* The first 16 of the following VLCs are for the dc coefficients;
270  the others are four groups of 16 VLCs each for ac coefficients. */
271  VLC coeff_vlc[5 * 16];
272 
273  VLC superblock_run_length_vlc; /* version < 2 */
274  VLC fragment_run_length_vlc; /* version < 2 */
275  VLC block_pattern_vlc[2]; /* version >= 2*/
277  VLC motion_vector_vlc; /* version < 2 */
278  VLC vp4_mv_vlc[2][7]; /* version >=2 */
279 
280  /* these arrays need to be on 16-byte boundaries since SSE2 operations
281  * index into them */
282  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
283 
284  /* This table contains superblock_count * 16 entries. Each set of 16
285  * numbers corresponds to the fragment indexes 0..15 of the superblock.
286  * An entry will be -1 to indicate that no entry corresponds to that
287  * index. */
289 
290  /* This is an array that indicates how a particular macroblock
291  * is coded. */
292  unsigned char *macroblock_coding;
293 
295 
296  /* Huffman decode */
298 
299  uint8_t filter_limit_values[64];
300  DECLARE_ALIGNED(8, int, bounding_values_array)[256 + 2];
301 
302  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
304 
305 /************************************************************************
306  * VP3 specific functions
307  ************************************************************************/
308 
309 static av_cold void free_tables(AVCodecContext *avctx)
310 {
311  Vp3DecodeContext *s = avctx->priv_data;
312 
314  av_freep(&s->all_fragments);
320  av_freep(&s->dc_pred_row);
321  av_freep(&s->motion_val[0]);
322  av_freep(&s->motion_val[1]);
323 }
324 
325 static void vp3_decode_flush(AVCodecContext *avctx)
326 {
327  Vp3DecodeContext *s = avctx->priv_data;
328 
329  if (s->golden_frame.f)
331  if (s->last_frame.f)
333  if (s->current_frame.f)
335 }
336 
338 {
339  Vp3DecodeContext *s = avctx->priv_data;
340  int i, j;
341 
342  free_tables(avctx);
344 
345  s->theora_tables = 0;
346 
347  /* release all frames */
348  vp3_decode_flush(avctx);
352 
353  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++)
354  ff_free_vlc(&s->coeff_vlc[i]);
355 
360 
361  for (j = 0; j < 2; j++)
362  for (i = 0; i < 7; i++)
363  ff_free_vlc(&s->vp4_mv_vlc[j][i]);
364 
365  for (i = 0; i < 2; i++)
367  return 0;
368 }
369 
370 /**
371  * This function sets up all of the various blocks mappings:
372  * superblocks <-> fragments, macroblocks <-> fragments,
373  * superblocks <-> macroblocks
374  *
375  * @return 0 is successful; returns 1 if *anything* went wrong.
376  */
378 {
379  int sb_x, sb_y, plane;
380  int x, y, i, j = 0;
381 
382  for (plane = 0; plane < 3; plane++) {
383  int sb_width = plane ? s->c_superblock_width
384  : s->y_superblock_width;
385  int sb_height = plane ? s->c_superblock_height
386  : s->y_superblock_height;
387  int frag_width = s->fragment_width[!!plane];
388  int frag_height = s->fragment_height[!!plane];
389 
390  for (sb_y = 0; sb_y < sb_height; sb_y++)
391  for (sb_x = 0; sb_x < sb_width; sb_x++)
392  for (i = 0; i < 16; i++) {
393  x = 4 * sb_x + hilbert_offset[i][0];
394  y = 4 * sb_y + hilbert_offset[i][1];
395 
396  if (x < frag_width && y < frag_height)
397  s->superblock_fragments[j++] = s->fragment_start[plane] +
398  y * frag_width + x;
399  else
400  s->superblock_fragments[j++] = -1;
401  }
402  }
403 
404  return 0; /* successful path out */
405 }
406 
407 /*
408  * This function sets up the dequantization tables used for a particular
409  * frame.
410  */
412 {
413  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
414  int i, plane, inter, qri, bmi, bmj, qistart;
415 
416  for (inter = 0; inter < 2; inter++) {
417  for (plane = 0; plane < 3; plane++) {
418  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
419  int sum = 0;
420  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
421  sum += s->qr_size[inter][plane][qri];
422  if (s->qps[qpi] <= sum)
423  break;
424  }
425  qistart = sum - s->qr_size[inter][plane][qri];
426  bmi = s->qr_base[inter][plane][qri];
427  bmj = s->qr_base[inter][plane][qri + 1];
428  for (i = 0; i < 64; i++) {
429  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
430  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
431  s->qr_size[inter][plane][qri]) /
432  (2 * s->qr_size[inter][plane][qri]);
433 
434  int qmin = 8 << (inter + !i);
435  int qscale = i ? ac_scale_factor : dc_scale_factor;
436  int qbias = (1 + inter) * 3;
437  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
438  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
439  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
440  }
441  /* all DC coefficients use the same quant so as not to interfere
442  * with DC prediction */
443  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
444  }
445  }
446 }
447 
448 /*
449  * This function initializes the loop filter boundary limits if the frame's
450  * quality index is different from the previous frame's.
451  *
452  * The filter_limit_values may not be larger than 127.
453  */
455 {
457 }
458 
459 /*
460  * This function unpacks all of the superblock/macroblock/fragment coding
461  * information from the bitstream.
462  */
464 {
465  int superblock_starts[3] = {
467  };
468  int bit = 0;
469  int current_superblock = 0;
470  int current_run = 0;
471  int num_partial_superblocks = 0;
472 
473  int i, j;
474  int current_fragment;
475  int plane;
476  int plane0_num_coded_frags = 0;
477 
478  if (s->keyframe) {
480  } else {
481  /* unpack the list of partially-coded superblocks */
482  bit = get_bits1(gb) ^ 1;
483  current_run = 0;
484 
485  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
486  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
487  bit = get_bits1(gb);
488  else
489  bit ^= 1;
490 
491  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
492  6, 2) + 1;
493  if (current_run == 34)
494  current_run += get_bits(gb, 12);
495 
496  if (current_run > s->superblock_count - current_superblock) {
498  "Invalid partially coded superblock run length\n");
499  return -1;
500  }
501 
502  memset(s->superblock_coding + current_superblock, bit, current_run);
503 
504  current_superblock += current_run;
505  if (bit)
506  num_partial_superblocks += current_run;
507  }
508 
509  /* unpack the list of fully coded superblocks if any of the blocks were
510  * not marked as partially coded in the previous step */
511  if (num_partial_superblocks < s->superblock_count) {
512  int superblocks_decoded = 0;
513 
514  current_superblock = 0;
515  bit = get_bits1(gb) ^ 1;
516  current_run = 0;
517 
518  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
519  get_bits_left(gb) > 0) {
520  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
521  bit = get_bits1(gb);
522  else
523  bit ^= 1;
524 
525  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
526  6, 2) + 1;
527  if (current_run == 34)
528  current_run += get_bits(gb, 12);
529 
530  for (j = 0; j < current_run; current_superblock++) {
531  if (current_superblock >= s->superblock_count) {
533  "Invalid fully coded superblock run length\n");
534  return -1;
535  }
536 
537  /* skip any superblocks already marked as partially coded */
538  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
539  s->superblock_coding[current_superblock] = 2 * bit;
540  j++;
541  }
542  }
543  superblocks_decoded += current_run;
544  }
545  }
546 
547  /* if there were partial blocks, initialize bitstream for
548  * unpacking fragment codings */
549  if (num_partial_superblocks) {
550  current_run = 0;
551  bit = get_bits1(gb);
552  /* toggle the bit because as soon as the first run length is
553  * fetched the bit will be toggled again */
554  bit ^= 1;
555  }
556  }
557 
558  /* figure out which fragments are coded; iterate through each
559  * superblock (all planes) */
560  s->total_num_coded_frags = 0;
562 
565 
566  for (plane = 0; plane < 3; plane++) {
567  int sb_start = superblock_starts[plane];
568  int sb_end = sb_start + (plane ? s->c_superblock_count
569  : s->y_superblock_count);
570  int num_coded_frags = 0;
571 
572  if (s->keyframe) {
573  if (s->num_kf_coded_fragment[plane] == -1) {
574  for (i = sb_start; i < sb_end; i++) {
575  /* iterate through all 16 fragments in a superblock */
576  for (j = 0; j < 16; j++) {
577  /* if the fragment is in bounds, check its coding status */
578  current_fragment = s->superblock_fragments[i * 16 + j];
579  if (current_fragment != -1) {
580  s->coded_fragment_list[plane][num_coded_frags++] =
581  current_fragment;
582  }
583  }
584  }
585  s->num_kf_coded_fragment[plane] = num_coded_frags;
586  } else
587  num_coded_frags = s->num_kf_coded_fragment[plane];
588  } else {
589  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
590  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
591  return AVERROR_INVALIDDATA;
592  }
593  /* iterate through all 16 fragments in a superblock */
594  for (j = 0; j < 16; j++) {
595  /* if the fragment is in bounds, check its coding status */
596  current_fragment = s->superblock_fragments[i * 16 + j];
597  if (current_fragment != -1) {
598  int coded = s->superblock_coding[i];
599 
600  if (coded == SB_PARTIALLY_CODED) {
601  /* fragment may or may not be coded; this is the case
602  * that cares about the fragment coding runs */
603  if (current_run-- == 0) {
604  bit ^= 1;
605  current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2);
606  }
607  coded = bit;
608  }
609 
610  if (coded) {
611  /* default mode; actual mode will be decoded in
612  * the next phase */
613  s->all_fragments[current_fragment].coding_method =
615  s->coded_fragment_list[plane][num_coded_frags++] =
616  current_fragment;
617  } else {
618  /* not coded; copy this fragment from the prior frame */
619  s->all_fragments[current_fragment].coding_method =
620  MODE_COPY;
621  }
622  }
623  }
624  }
625  }
626  if (!plane)
627  plane0_num_coded_frags = num_coded_frags;
628  s->total_num_coded_frags += num_coded_frags;
629  for (i = 0; i < 64; i++)
630  s->num_coded_frags[plane][i] = num_coded_frags;
631  if (plane < 2)
632  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
633  num_coded_frags;
634  }
635  return 0;
636 }
637 
638 #define BLOCK_X (2 * mb_x + (k & 1))
639 #define BLOCK_Y (2 * mb_y + (k >> 1))
640 
641 #if CONFIG_VP4_DECODER
642 /**
643  * @return number of blocks, or > yuv_macroblock_count on error.
644  * return value is always >= 1.
645  */
646 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
647 {
648  int v = 1;
649  int bits;
650  while ((bits = show_bits(gb, 9)) == 0x1ff) {
651  skip_bits(gb, 9);
652  v += 256;
653  if (v > s->yuv_macroblock_count) {
654  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
655  return v;
656  }
657  }
658 #define body(n) { \
659  skip_bits(gb, 2 + n); \
660  v += (1 << n) + get_bits(gb, n); }
661 #define thresh(n) (0x200 - (0x80 >> n))
662 #define else_if(n) else if (bits < thresh(n)) body(n)
663  if (bits < 0x100) {
664  skip_bits(gb, 1);
665  } else if (bits < thresh(0)) {
666  skip_bits(gb, 2);
667  v += 1;
668  }
669  else_if(1)
670  else_if(2)
671  else_if(3)
672  else_if(4)
673  else_if(5)
674  else_if(6)
675  else body(7)
676 #undef body
677 #undef thresh
678 #undef else_if
679  return v;
680 }
681 
682 static int vp4_get_block_pattern(Vp3DecodeContext *s, GetBitContext *gb, int *next_block_pattern_table)
683 {
684  int v = get_vlc2(gb, s->block_pattern_vlc[*next_block_pattern_table].table, 3, 2);
685  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
686  return v + 1;
687 }
688 
689 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
690 {
691  int plane, i, j, k, fragment;
692  int next_block_pattern_table;
693  int bit, current_run, has_partial;
694 
696 
697  if (s->keyframe)
698  return 0;
699 
700  has_partial = 0;
701  bit = get_bits1(gb);
702  for (i = 0; i < s->yuv_macroblock_count; i += current_run) {
703  if (get_bits_left(gb) <= 0)
704  return AVERROR_INVALIDDATA;
705  current_run = vp4_get_mb_count(s, gb);
706  if (current_run > s->yuv_macroblock_count - i)
707  return -1;
708  memset(s->superblock_coding + i, 2 * bit, current_run);
709  bit ^= 1;
710  has_partial |= bit;
711  }
712 
713  if (has_partial) {
714  if (get_bits_left(gb) <= 0)
715  return AVERROR_INVALIDDATA;
716  bit = get_bits1(gb);
717  current_run = vp4_get_mb_count(s, gb);
718  for (i = 0; i < s->yuv_macroblock_count; i++) {
719  if (!s->superblock_coding[i]) {
720  if (!current_run) {
721  bit ^= 1;
722  current_run = vp4_get_mb_count(s, gb);
723  }
724  s->superblock_coding[i] = bit;
725  current_run--;
726  }
727  }
728  if (current_run) /* handle situation when vp4_get_mb_count() fails */
729  return -1;
730  }
731 
732  next_block_pattern_table = 0;
733  i = 0;
734  for (plane = 0; plane < 3; plane++) {
735  int sb_x, sb_y;
736  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
737  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
738  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
739  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
740  int fragment_width = s->fragment_width[!!plane];
741  int fragment_height = s->fragment_height[!!plane];
742 
743  for (sb_y = 0; sb_y < sb_height; sb_y++) {
744  for (sb_x = 0; sb_x < sb_width; sb_x++) {
745  for (j = 0; j < 4; j++) {
746  int mb_x = 2 * sb_x + (j >> 1);
747  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
748  int mb_coded, pattern, coded;
749 
750  if (mb_x >= mb_width || mb_y >= mb_height)
751  continue;
752 
753  mb_coded = s->superblock_coding[i++];
754 
755  if (mb_coded == SB_FULLY_CODED)
756  pattern = 0xF;
757  else if (mb_coded == SB_PARTIALLY_CODED)
758  pattern = vp4_get_block_pattern(s, gb, &next_block_pattern_table);
759  else
760  pattern = 0;
761 
762  for (k = 0; k < 4; k++) {
763  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
764  continue;
765  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
766  coded = pattern & (8 >> k);
767  /* MODE_INTER_NO_MV is the default for coded fragments.
768  the actual method is decoded in the next phase. */
769  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
770  }
771  }
772  }
773  }
774  }
775  return 0;
776 }
777 #endif
778 
779 /*
780  * This function unpacks all the coding mode data for individual macroblocks
781  * from the bitstream.
782  */
784 {
785  int i, j, k, sb_x, sb_y;
786  int scheme;
787  int current_macroblock;
788  int current_fragment;
789  int coding_mode;
790  int custom_mode_alphabet[CODING_MODE_COUNT];
791  const int *alphabet;
792  Vp3Fragment *frag;
793 
794  if (s->keyframe) {
795  for (i = 0; i < s->fragment_count; i++)
797  } else {
798  /* fetch the mode coding scheme for this frame */
799  scheme = get_bits(gb, 3);
800 
801  /* is it a custom coding scheme? */
802  if (scheme == 0) {
803  for (i = 0; i < 8; i++)
804  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
805  for (i = 0; i < 8; i++)
806  custom_mode_alphabet[get_bits(gb, 3)] = i;
807  alphabet = custom_mode_alphabet;
808  } else
809  alphabet = ModeAlphabet[scheme - 1];
810 
811  /* iterate through all of the macroblocks that contain 1 or more
812  * coded fragments */
813  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
814  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
815  if (get_bits_left(gb) <= 0)
816  return -1;
817 
818  for (j = 0; j < 4; j++) {
819  int mb_x = 2 * sb_x + (j >> 1);
820  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
821  current_macroblock = mb_y * s->macroblock_width + mb_x;
822 
823  if (mb_x >= s->macroblock_width ||
824  mb_y >= s->macroblock_height)
825  continue;
826 
827  /* coding modes are only stored if the macroblock has
828  * at least one luma block coded, otherwise it must be
829  * INTER_NO_MV */
830  for (k = 0; k < 4; k++) {
831  current_fragment = BLOCK_Y *
832  s->fragment_width[0] + BLOCK_X;
833  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
834  break;
835  }
836  if (k == 4) {
837  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
838  continue;
839  }
840 
841  /* mode 7 means get 3 bits for each coding mode */
842  if (scheme == 7)
843  coding_mode = get_bits(gb, 3);
844  else
845  coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
846 
847  s->macroblock_coding[current_macroblock] = coding_mode;
848  for (k = 0; k < 4; k++) {
849  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
850  if (frag->coding_method != MODE_COPY)
851  frag->coding_method = coding_mode;
852  }
853 
854 #define SET_CHROMA_MODES \
855  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
856  frag[s->fragment_start[1]].coding_method = coding_mode; \
857  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
858  frag[s->fragment_start[2]].coding_method = coding_mode;
859 
860  if (s->chroma_y_shift) {
861  frag = s->all_fragments + mb_y *
862  s->fragment_width[1] + mb_x;
864  } else if (s->chroma_x_shift) {
865  frag = s->all_fragments +
866  2 * mb_y * s->fragment_width[1] + mb_x;
867  for (k = 0; k < 2; k++) {
869  frag += s->fragment_width[1];
870  }
871  } else {
872  for (k = 0; k < 4; k++) {
873  frag = s->all_fragments +
874  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
876  }
877  }
878  }
879  }
880  }
881  }
882 
883  return 0;
884 }
885 
886 static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
887 {
888  int v = get_vlc2(gb, s->vp4_mv_vlc[axis][vp4_mv_table_selector[FFABS(last_motion)]].table, 6, 2) - 31;
889  return last_motion < 0 ? -v : v;
890 }
891 
892 /*
893  * This function unpacks all the motion vectors for the individual
894  * macroblocks from the bitstream.
895  */
897 {
898  int j, k, sb_x, sb_y;
899  int coding_mode;
900  int motion_x[4];
901  int motion_y[4];
902  int last_motion_x = 0;
903  int last_motion_y = 0;
904  int prior_last_motion_x = 0;
905  int prior_last_motion_y = 0;
906  int last_gold_motion_x = 0;
907  int last_gold_motion_y = 0;
908  int current_macroblock;
909  int current_fragment;
910  int frag;
911 
912  if (s->keyframe)
913  return 0;
914 
915  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
916  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
917 
918  /* iterate through all of the macroblocks that contain 1 or more
919  * coded fragments */
920  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
921  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
922  if (get_bits_left(gb) <= 0)
923  return -1;
924 
925  for (j = 0; j < 4; j++) {
926  int mb_x = 2 * sb_x + (j >> 1);
927  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
928  current_macroblock = mb_y * s->macroblock_width + mb_x;
929 
930  if (mb_x >= s->macroblock_width ||
931  mb_y >= s->macroblock_height ||
932  s->macroblock_coding[current_macroblock] == MODE_COPY)
933  continue;
934 
935  switch (s->macroblock_coding[current_macroblock]) {
936  case MODE_GOLDEN_MV:
937  if (coding_mode == 2) { /* VP4 */
938  last_gold_motion_x = motion_x[0] = vp4_get_mv(s, gb, 0, last_gold_motion_x);
939  last_gold_motion_y = motion_y[0] = vp4_get_mv(s, gb, 1, last_gold_motion_y);
940  break;
941  } /* otherwise fall through */
942  case MODE_INTER_PLUS_MV:
943  /* all 6 fragments use the same motion vector */
944  if (coding_mode == 0) {
945  motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
946  motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
947  } else if (coding_mode == 1) {
948  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
949  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
950  } else { /* VP4 */
951  motion_x[0] = vp4_get_mv(s, gb, 0, last_motion_x);
952  motion_y[0] = vp4_get_mv(s, gb, 1, last_motion_y);
953  }
954 
955  /* vector maintenance, only on MODE_INTER_PLUS_MV */
956  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
957  prior_last_motion_x = last_motion_x;
958  prior_last_motion_y = last_motion_y;
959  last_motion_x = motion_x[0];
960  last_motion_y = motion_y[0];
961  }
962  break;
963 
964  case MODE_INTER_FOURMV:
965  /* vector maintenance */
966  prior_last_motion_x = last_motion_x;
967  prior_last_motion_y = last_motion_y;
968 
969  /* fetch 4 vectors from the bitstream, one for each
970  * Y fragment, then average for the C fragment vectors */
971  for (k = 0; k < 4; k++) {
972  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
973  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
974  if (coding_mode == 0) {
975  motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
976  motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
977  } else if (coding_mode == 1) {
978  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
979  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
980  } else { /* VP4 */
981  motion_x[k] = vp4_get_mv(s, gb, 0, prior_last_motion_x);
982  motion_y[k] = vp4_get_mv(s, gb, 1, prior_last_motion_y);
983  }
984  last_motion_x = motion_x[k];
985  last_motion_y = motion_y[k];
986  } else {
987  motion_x[k] = 0;
988  motion_y[k] = 0;
989  }
990  }
991  break;
992 
993  case MODE_INTER_LAST_MV:
994  /* all 6 fragments use the last motion vector */
995  motion_x[0] = last_motion_x;
996  motion_y[0] = last_motion_y;
997 
998  /* no vector maintenance (last vector remains the
999  * last vector) */
1000  break;
1001 
1002  case MODE_INTER_PRIOR_LAST:
1003  /* all 6 fragments use the motion vector prior to the
1004  * last motion vector */
1005  motion_x[0] = prior_last_motion_x;
1006  motion_y[0] = prior_last_motion_y;
1007 
1008  /* vector maintenance */
1009  prior_last_motion_x = last_motion_x;
1010  prior_last_motion_y = last_motion_y;
1011  last_motion_x = motion_x[0];
1012  last_motion_y = motion_y[0];
1013  break;
1014 
1015  default:
1016  /* covers intra, inter without MV, golden without MV */
1017  motion_x[0] = 0;
1018  motion_y[0] = 0;
1019 
1020  /* no vector maintenance */
1021  break;
1022  }
1023 
1024  /* assign the motion vectors to the correct fragments */
1025  for (k = 0; k < 4; k++) {
1026  current_fragment =
1027  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1028  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1029  s->motion_val[0][current_fragment][0] = motion_x[k];
1030  s->motion_val[0][current_fragment][1] = motion_y[k];
1031  } else {
1032  s->motion_val[0][current_fragment][0] = motion_x[0];
1033  s->motion_val[0][current_fragment][1] = motion_y[0];
1034  }
1035  }
1036 
1037  if (s->chroma_y_shift) {
1038  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1039  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1040  motion_x[2] + motion_x[3], 2);
1041  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1042  motion_y[2] + motion_y[3], 2);
1043  }
1044  if (s->version <= 2) {
1045  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1046  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1047  }
1048  frag = mb_y * s->fragment_width[1] + mb_x;
1049  s->motion_val[1][frag][0] = motion_x[0];
1050  s->motion_val[1][frag][1] = motion_y[0];
1051  } else if (s->chroma_x_shift) {
1052  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1053  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1054  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1055  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1056  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1057  } else {
1058  motion_x[1] = motion_x[0];
1059  motion_y[1] = motion_y[0];
1060  }
1061  if (s->version <= 2) {
1062  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1063  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1064  }
1065  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1066  for (k = 0; k < 2; k++) {
1067  s->motion_val[1][frag][0] = motion_x[k];
1068  s->motion_val[1][frag][1] = motion_y[k];
1069  frag += s->fragment_width[1];
1070  }
1071  } else {
1072  for (k = 0; k < 4; k++) {
1073  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1074  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1075  s->motion_val[1][frag][0] = motion_x[k];
1076  s->motion_val[1][frag][1] = motion_y[k];
1077  } else {
1078  s->motion_val[1][frag][0] = motion_x[0];
1079  s->motion_val[1][frag][1] = motion_y[0];
1080  }
1081  }
1082  }
1083  }
1084  }
1085  }
1086 
1087  return 0;
1088 }
1089 
1091 {
1092  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
1093  int num_blocks = s->total_num_coded_frags;
1094 
1095  for (qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1096  i = blocks_decoded = num_blocks_at_qpi = 0;
1097 
1098  bit = get_bits1(gb) ^ 1;
1099  run_length = 0;
1100 
1101  do {
1102  if (run_length == MAXIMUM_LONG_BIT_RUN)
1103  bit = get_bits1(gb);
1104  else
1105  bit ^= 1;
1106 
1107  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
1108  if (run_length == 34)
1109  run_length += get_bits(gb, 12);
1110  blocks_decoded += run_length;
1111 
1112  if (!bit)
1113  num_blocks_at_qpi += run_length;
1114 
1115  for (j = 0; j < run_length; i++) {
1116  if (i >= s->total_num_coded_frags)
1117  return -1;
1118 
1119  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1120  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1121  j++;
1122  }
1123  }
1124  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1125 
1126  num_blocks -= num_blocks_at_qpi;
1127  }
1128 
1129  return 0;
1130 }
1131 
1132 static inline int get_eob_run(GetBitContext *gb, int token)
1133 {
1134  int v = eob_run_table[token].base;
1135  if (eob_run_table[token].bits)
1136  v += get_bits(gb, eob_run_table[token].bits);
1137  return v;
1138 }
1139 
1140 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1141 {
1142  int bits_to_get, zero_run;
1143 
1144  bits_to_get = coeff_get_bits[token];
1145  if (bits_to_get)
1146  bits_to_get = get_bits(gb, bits_to_get);
1147  *coeff = coeff_tables[token][bits_to_get];
1148 
1149  zero_run = zero_run_base[token];
1150  if (zero_run_get_bits[token])
1151  zero_run += get_bits(gb, zero_run_get_bits[token]);
1152 
1153  return zero_run;
1154 }
1155 
1156 /*
1157  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1158  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1159  * data. This function unpacks all the VLCs for either the Y plane or both
1160  * C planes, and is called for DC coefficients or different AC coefficient
1161  * levels (since different coefficient types require different VLC tables.
1162  *
1163  * This function returns a residual eob run. E.g, if a particular token gave
1164  * instructions to EOB the next 5 fragments and there were only 2 fragments
1165  * left in the current fragment range, 3 would be returned so that it could
1166  * be passed into the next call to this same function.
1167  */
1169  VLC *table, int coeff_index,
1170  int plane,
1171  int eob_run)
1172 {
1173  int i, j = 0;
1174  int token;
1175  int zero_run = 0;
1176  int16_t coeff = 0;
1177  int blocks_ended;
1178  int coeff_i = 0;
1179  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1180  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1181 
1182  /* local references to structure members to avoid repeated dereferences */
1183  int *coded_fragment_list = s->coded_fragment_list[plane];
1184  Vp3Fragment *all_fragments = s->all_fragments;
1185  VLC_TYPE(*vlc_table)[2] = table->table;
1186 
1187  if (num_coeffs < 0) {
1189  "Invalid number of coefficients at level %d\n", coeff_index);
1190  return AVERROR_INVALIDDATA;
1191  }
1192 
1193  if (eob_run > num_coeffs) {
1194  coeff_i =
1195  blocks_ended = num_coeffs;
1196  eob_run -= num_coeffs;
1197  } else {
1198  coeff_i =
1199  blocks_ended = eob_run;
1200  eob_run = 0;
1201  }
1202 
1203  // insert fake EOB token to cover the split between planes or zzi
1204  if (blocks_ended)
1205  dct_tokens[j++] = blocks_ended << 2;
1206 
1207  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1208  /* decode a VLC into a token */
1209  token = get_vlc2(gb, vlc_table, 11, 3);
1210  /* use the token to get a zero run, a coefficient, and an eob run */
1211  if ((unsigned) token <= 6U) {
1212  eob_run = get_eob_run(gb, token);
1213  if (!eob_run)
1214  eob_run = INT_MAX;
1215 
1216  // record only the number of blocks ended in this plane,
1217  // any spill will be recorded in the next plane.
1218  if (eob_run > num_coeffs - coeff_i) {
1219  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1220  blocks_ended += num_coeffs - coeff_i;
1221  eob_run -= num_coeffs - coeff_i;
1222  coeff_i = num_coeffs;
1223  } else {
1224  dct_tokens[j++] = TOKEN_EOB(eob_run);
1225  blocks_ended += eob_run;
1226  coeff_i += eob_run;
1227  eob_run = 0;
1228  }
1229  } else if (token >= 0) {
1230  zero_run = get_coeff(gb, token, &coeff);
1231 
1232  if (zero_run) {
1233  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1234  } else {
1235  // Save DC into the fragment structure. DC prediction is
1236  // done in raster order, so the actual DC can't be in with
1237  // other tokens. We still need the token in dct_tokens[]
1238  // however, or else the structure collapses on itself.
1239  if (!coeff_index)
1240  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1241 
1242  dct_tokens[j++] = TOKEN_COEFF(coeff);
1243  }
1244 
1245  if (coeff_index + zero_run > 64) {
1247  "Invalid zero run of %d with %d coeffs left\n",
1248  zero_run, 64 - coeff_index);
1249  zero_run = 64 - coeff_index;
1250  }
1251 
1252  // zero runs code multiple coefficients,
1253  // so don't try to decode coeffs for those higher levels
1254  for (i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1255  s->num_coded_frags[plane][i]--;
1256  coeff_i++;
1257  } else {
1258  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1259  return -1;
1260  }
1261  }
1262 
1263  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1264  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1265 
1266  // decrement the number of blocks that have higher coefficients for each
1267  // EOB run at this level
1268  if (blocks_ended)
1269  for (i = coeff_index + 1; i < 64; i++)
1270  s->num_coded_frags[plane][i] -= blocks_ended;
1271 
1272  // setup the next buffer
1273  if (plane < 2)
1274  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1275  else if (coeff_index < 63)
1276  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1277 
1278  return eob_run;
1279 }
1280 
1282  int first_fragment,
1283  int fragment_width,
1284  int fragment_height);
1285 /*
1286  * This function unpacks all of the DCT coefficient data from the
1287  * bitstream.
1288  */
1290 {
1291  int i;
1292  int dc_y_table;
1293  int dc_c_table;
1294  int ac_y_table;
1295  int ac_c_table;
1296  int residual_eob_run = 0;
1297  VLC *y_tables[64];
1298  VLC *c_tables[64];
1299 
1300  s->dct_tokens[0][0] = s->dct_tokens_base;
1301 
1302  if (get_bits_left(gb) < 16)
1303  return AVERROR_INVALIDDATA;
1304 
1305  /* fetch the DC table indexes */
1306  dc_y_table = get_bits(gb, 4);
1307  dc_c_table = get_bits(gb, 4);
1308 
1309  /* unpack the Y plane DC coefficients */
1310  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_y_table], 0,
1311  0, residual_eob_run);
1312  if (residual_eob_run < 0)
1313  return residual_eob_run;
1314  if (get_bits_left(gb) < 8)
1315  return AVERROR_INVALIDDATA;
1316 
1317  /* reverse prediction of the Y-plane DC coefficients */
1319 
1320  /* unpack the C plane DC coefficients */
1321  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1322  1, residual_eob_run);
1323  if (residual_eob_run < 0)
1324  return residual_eob_run;
1325  residual_eob_run = unpack_vlcs(s, gb, &s->coeff_vlc[dc_c_table], 0,
1326  2, residual_eob_run);
1327  if (residual_eob_run < 0)
1328  return residual_eob_run;
1329 
1330  /* reverse prediction of the C-plane DC coefficients */
1331  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1333  s->fragment_width[1], s->fragment_height[1]);
1335  s->fragment_width[1], s->fragment_height[1]);
1336  }
1337 
1338  if (get_bits_left(gb) < 8)
1339  return AVERROR_INVALIDDATA;
1340  /* fetch the AC table indexes */
1341  ac_y_table = get_bits(gb, 4);
1342  ac_c_table = get_bits(gb, 4);
1343 
1344  /* build tables of AC VLC tables */
1345  for (i = 1; i <= 5; i++) {
1346  /* AC VLC table group 1 */
1347  y_tables[i] = &s->coeff_vlc[ac_y_table + 16];
1348  c_tables[i] = &s->coeff_vlc[ac_c_table + 16];
1349  }
1350  for (i = 6; i <= 14; i++) {
1351  /* AC VLC table group 2 */
1352  y_tables[i] = &s->coeff_vlc[ac_y_table + 32];
1353  c_tables[i] = &s->coeff_vlc[ac_c_table + 32];
1354  }
1355  for (i = 15; i <= 27; i++) {
1356  /* AC VLC table group 3 */
1357  y_tables[i] = &s->coeff_vlc[ac_y_table + 48];
1358  c_tables[i] = &s->coeff_vlc[ac_c_table + 48];
1359  }
1360  for (i = 28; i <= 63; i++) {
1361  /* AC VLC table group 4 */
1362  y_tables[i] = &s->coeff_vlc[ac_y_table + 64];
1363  c_tables[i] = &s->coeff_vlc[ac_c_table + 64];
1364  }
1365 
1366  /* decode all AC coefficients */
1367  for (i = 1; i <= 63; i++) {
1368  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1369  0, residual_eob_run);
1370  if (residual_eob_run < 0)
1371  return residual_eob_run;
1372 
1373  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1374  1, residual_eob_run);
1375  if (residual_eob_run < 0)
1376  return residual_eob_run;
1377  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1378  2, residual_eob_run);
1379  if (residual_eob_run < 0)
1380  return residual_eob_run;
1381  }
1382 
1383  return 0;
1384 }
1385 
1386 #if CONFIG_VP4_DECODER
1387 /**
1388  * eob_tracker[] is instead of TOKEN_EOB(value)
1389  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1390  *
1391  * @return < 0 on error
1392  */
1393 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1394  VLC *vlc_tables[64],
1395  int plane, int eob_tracker[64], int fragment)
1396 {
1397  int token;
1398  int zero_run = 0;
1399  int16_t coeff = 0;
1400  int coeff_i = 0;
1401  int eob_run;
1402 
1403  while (!eob_tracker[coeff_i]) {
1404  if (get_bits_left(gb) < 1)
1405  return AVERROR_INVALIDDATA;
1406 
1407  token = get_vlc2(gb, vlc_tables[coeff_i]->table, 11, 3);
1408 
1409  /* use the token to get a zero run, a coefficient, and an eob run */
1410  if ((unsigned) token <= 6U) {
1411  eob_run = get_eob_run(gb, token);
1412  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1413  eob_tracker[coeff_i] = eob_run - 1;
1414  return 0;
1415  } else if (token >= 0) {
1416  zero_run = get_coeff(gb, token, &coeff);
1417 
1418  if (zero_run) {
1419  if (coeff_i + zero_run > 64) {
1421  "Invalid zero run of %d with %d coeffs left\n",
1422  zero_run, 64 - coeff_i);
1423  zero_run = 64 - coeff_i;
1424  }
1425  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1426  coeff_i += zero_run;
1427  } else {
1428  if (!coeff_i)
1429  s->all_fragments[fragment].dc = coeff;
1430 
1431  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1432  }
1433  coeff_i++;
1434  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1435  return 0; /* stop */
1436  } else {
1437  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1438  return -1;
1439  }
1440  }
1441  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1442  eob_tracker[coeff_i]--;
1443  return 0;
1444 }
1445 
1446 static void vp4_dc_predictor_reset(VP4Predictor *p)
1447 {
1448  p->dc = 0;
1449  p->type = VP4_DC_UNDEFINED;
1450 }
1451 
1452 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1453 {
1454  int i, j;
1455 
1456  for (i = 0; i < 4; i++)
1457  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1458 
1459  for (j = 1; j < 5; j++)
1460  for (i = 0; i < 4; i++)
1461  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1462 }
1463 
1464 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1465 {
1466  int i;
1467 
1468  for (i = 0; i < 4; i++)
1469  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1470 
1471  for (i = 1; i < 5; i++)
1472  dc_pred[i][0] = dc_pred[i][4];
1473 }
1474 
1475 /* note: dc_pred points to the current block */
1476 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1477 {
1478  int count = 0;
1479  int dc = 0;
1480 
1481  if (dc_pred[-6].type == type) {
1482  dc += dc_pred[-6].dc;
1483  count++;
1484  }
1485 
1486  if (dc_pred[6].type == type) {
1487  dc += dc_pred[6].dc;
1488  count++;
1489  }
1490 
1491  if (count != 2 && dc_pred[-1].type == type) {
1492  dc += dc_pred[-1].dc;
1493  count++;
1494  }
1495 
1496  if (count != 2 && dc_pred[1].type == type) {
1497  dc += dc_pred[1].dc;
1498  count++;
1499  }
1500 
1501  /* using division instead of shift to correctly handle negative values */
1502  return count == 2 ? dc / 2 : last_dc[type];
1503 }
1504 
1505 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1506 {
1507  int plane, i;
1508  int16_t *base = s->dct_tokens_base;
1509  for (plane = 0; plane < 3; plane++) {
1510  for (i = 0; i < 64; i++) {
1511  s->dct_tokens[plane][i] = base;
1512  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1513  }
1514  }
1515 }
1516 
1517 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1518 {
1519  int i, j;
1520  int dc_y_table;
1521  int dc_c_table;
1522  int ac_y_table;
1523  int ac_c_table;
1524  VLC *tables[2][64];
1525  int plane, sb_y, sb_x;
1526  int eob_tracker[64];
1527  VP4Predictor dc_pred[6][6];
1528  int last_dc[NB_VP4_DC_TYPES];
1529 
1530  if (get_bits_left(gb) < 16)
1531  return AVERROR_INVALIDDATA;
1532 
1533  /* fetch the DC table indexes */
1534  dc_y_table = get_bits(gb, 4);
1535  dc_c_table = get_bits(gb, 4);
1536 
1537  ac_y_table = get_bits(gb, 4);
1538  ac_c_table = get_bits(gb, 4);
1539 
1540  /* build tables of DC/AC VLC tables */
1541 
1542  /* DC table group */
1543  tables[0][0] = &s->coeff_vlc[dc_y_table];
1544  tables[1][0] = &s->coeff_vlc[dc_c_table];
1545  for (i = 1; i <= 5; i++) {
1546  /* AC VLC table group 1 */
1547  tables[0][i] = &s->coeff_vlc[ac_y_table + 16];
1548  tables[1][i] = &s->coeff_vlc[ac_c_table + 16];
1549  }
1550  for (i = 6; i <= 14; i++) {
1551  /* AC VLC table group 2 */
1552  tables[0][i] = &s->coeff_vlc[ac_y_table + 32];
1553  tables[1][i] = &s->coeff_vlc[ac_c_table + 32];
1554  }
1555  for (i = 15; i <= 27; i++) {
1556  /* AC VLC table group 3 */
1557  tables[0][i] = &s->coeff_vlc[ac_y_table + 48];
1558  tables[1][i] = &s->coeff_vlc[ac_c_table + 48];
1559  }
1560  for (i = 28; i <= 63; i++) {
1561  /* AC VLC table group 4 */
1562  tables[0][i] = &s->coeff_vlc[ac_y_table + 64];
1563  tables[1][i] = &s->coeff_vlc[ac_c_table + 64];
1564  }
1565 
1566  vp4_set_tokens_base(s);
1567 
1568  memset(last_dc, 0, sizeof(last_dc));
1569 
1570  for (plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1571  memset(eob_tracker, 0, sizeof(eob_tracker));
1572 
1573  /* initialise dc prediction */
1574  for (i = 0; i < s->fragment_width[!!plane]; i++)
1575  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1576 
1577  for (j = 0; j < 6; j++)
1578  for (i = 0; i < 6; i++)
1579  vp4_dc_predictor_reset(&dc_pred[j][i]);
1580 
1581  for (sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1582  for (sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1583  vp4_dc_pred_before(s, dc_pred, sb_x);
1584  for (j = 0; j < 16; j++) {
1585  int hx = hilbert_offset[j][0];
1586  int hy = hilbert_offset[j][1];
1587  int x = 4 * sb_x + hx;
1588  int y = 4 * sb_y + hy;
1589  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1590  int fragment, dc_block_type;
1591 
1592  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1593  continue;
1594 
1595  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1596 
1597  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1598  continue;
1599 
1600  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1601  return -1;
1602 
1603  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1604 
1605  s->all_fragments[fragment].dc +=
1606  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1607 
1608  this_dc_pred->type = dc_block_type,
1609  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1610  }
1611  vp4_dc_pred_after(s, dc_pred, sb_x);
1612  }
1613  }
1614  }
1615 
1616  vp4_set_tokens_base(s);
1617 
1618  return 0;
1619 }
1620 #endif
1621 
1622 /*
1623  * This function reverses the DC prediction for each coded fragment in
1624  * the frame. Much of this function is adapted directly from the original
1625  * VP3 source code.
1626  */
1627 #define COMPATIBLE_FRAME(x) \
1628  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1629 #define DC_COEFF(u) s->all_fragments[u].dc
1630 
1632  int first_fragment,
1633  int fragment_width,
1634  int fragment_height)
1635 {
1636 #define PUL 8
1637 #define PU 4
1638 #define PUR 2
1639 #define PL 1
1640 
1641  int x, y;
1642  int i = first_fragment;
1643 
1644  int predicted_dc;
1645 
1646  /* DC values for the left, up-left, up, and up-right fragments */
1647  int vl, vul, vu, vur;
1648 
1649  /* indexes for the left, up-left, up, and up-right fragments */
1650  int l, ul, u, ur;
1651 
1652  /*
1653  * The 6 fields mean:
1654  * 0: up-left multiplier
1655  * 1: up multiplier
1656  * 2: up-right multiplier
1657  * 3: left multiplier
1658  */
1659  static const int predictor_transform[16][4] = {
1660  { 0, 0, 0, 0 },
1661  { 0, 0, 0, 128 }, // PL
1662  { 0, 0, 128, 0 }, // PUR
1663  { 0, 0, 53, 75 }, // PUR|PL
1664  { 0, 128, 0, 0 }, // PU
1665  { 0, 64, 0, 64 }, // PU |PL
1666  { 0, 128, 0, 0 }, // PU |PUR
1667  { 0, 0, 53, 75 }, // PU |PUR|PL
1668  { 128, 0, 0, 0 }, // PUL
1669  { 0, 0, 0, 128 }, // PUL|PL
1670  { 64, 0, 64, 0 }, // PUL|PUR
1671  { 0, 0, 53, 75 }, // PUL|PUR|PL
1672  { 0, 128, 0, 0 }, // PUL|PU
1673  { -104, 116, 0, 116 }, // PUL|PU |PL
1674  { 24, 80, 24, 0 }, // PUL|PU |PUR
1675  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1676  };
1677 
1678  /* This table shows which types of blocks can use other blocks for
1679  * prediction. For example, INTRA is the only mode in this table to
1680  * have a frame number of 0. That means INTRA blocks can only predict
1681  * from other INTRA blocks. There are 2 golden frame coding types;
1682  * blocks encoding in these modes can only predict from other blocks
1683  * that were encoded with these 1 of these 2 modes. */
1684  static const unsigned char compatible_frame[9] = {
1685  1, /* MODE_INTER_NO_MV */
1686  0, /* MODE_INTRA */
1687  1, /* MODE_INTER_PLUS_MV */
1688  1, /* MODE_INTER_LAST_MV */
1689  1, /* MODE_INTER_PRIOR_MV */
1690  2, /* MODE_USING_GOLDEN */
1691  2, /* MODE_GOLDEN_MV */
1692  1, /* MODE_INTER_FOUR_MV */
1693  3 /* MODE_COPY */
1694  };
1695  int current_frame_type;
1696 
1697  /* there is a last DC predictor for each of the 3 frame types */
1698  short last_dc[3];
1699 
1700  int transform = 0;
1701 
1702  vul =
1703  vu =
1704  vur =
1705  vl = 0;
1706  last_dc[0] =
1707  last_dc[1] =
1708  last_dc[2] = 0;
1709 
1710  /* for each fragment row... */
1711  for (y = 0; y < fragment_height; y++) {
1712  /* for each fragment in a row... */
1713  for (x = 0; x < fragment_width; x++, i++) {
1714 
1715  /* reverse prediction if this block was coded */
1716  if (s->all_fragments[i].coding_method != MODE_COPY) {
1717  current_frame_type =
1718  compatible_frame[s->all_fragments[i].coding_method];
1719 
1720  transform = 0;
1721  if (x) {
1722  l = i - 1;
1723  vl = DC_COEFF(l);
1724  if (COMPATIBLE_FRAME(l))
1725  transform |= PL;
1726  }
1727  if (y) {
1728  u = i - fragment_width;
1729  vu = DC_COEFF(u);
1730  if (COMPATIBLE_FRAME(u))
1731  transform |= PU;
1732  if (x) {
1733  ul = i - fragment_width - 1;
1734  vul = DC_COEFF(ul);
1735  if (COMPATIBLE_FRAME(ul))
1736  transform |= PUL;
1737  }
1738  if (x + 1 < fragment_width) {
1739  ur = i - fragment_width + 1;
1740  vur = DC_COEFF(ur);
1741  if (COMPATIBLE_FRAME(ur))
1742  transform |= PUR;
1743  }
1744  }
1745 
1746  if (transform == 0) {
1747  /* if there were no fragments to predict from, use last
1748  * DC saved */
1749  predicted_dc = last_dc[current_frame_type];
1750  } else {
1751  /* apply the appropriate predictor transform */
1752  predicted_dc =
1753  (predictor_transform[transform][0] * vul) +
1754  (predictor_transform[transform][1] * vu) +
1755  (predictor_transform[transform][2] * vur) +
1756  (predictor_transform[transform][3] * vl);
1757 
1758  predicted_dc /= 128;
1759 
1760  /* check for outranging on the [ul u l] and
1761  * [ul u ur l] predictors */
1762  if ((transform == 15) || (transform == 13)) {
1763  if (FFABS(predicted_dc - vu) > 128)
1764  predicted_dc = vu;
1765  else if (FFABS(predicted_dc - vl) > 128)
1766  predicted_dc = vl;
1767  else if (FFABS(predicted_dc - vul) > 128)
1768  predicted_dc = vul;
1769  }
1770  }
1771 
1772  /* at long last, apply the predictor */
1773  DC_COEFF(i) += predicted_dc;
1774  /* save the DC */
1775  last_dc[current_frame_type] = DC_COEFF(i);
1776  }
1777  }
1778  }
1779 }
1780 
1781 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1782  int ystart, int yend)
1783 {
1784  int x, y;
1785  int *bounding_values = s->bounding_values_array + 127;
1786 
1787  int width = s->fragment_width[!!plane];
1788  int height = s->fragment_height[!!plane];
1789  int fragment = s->fragment_start[plane] + ystart * width;
1790  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1791  uint8_t *plane_data = s->current_frame.f->data[plane];
1792  if (!s->flipped_image)
1793  stride = -stride;
1794  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1795 
1796  for (y = ystart; y < yend; y++) {
1797  for (x = 0; x < width; x++) {
1798  /* This code basically just deblocks on the edges of coded blocks.
1799  * However, it has to be much more complicated because of the
1800  * brain damaged deblock ordering used in VP3/Theora. Order matters
1801  * because some pixels get filtered twice. */
1802  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1803  /* do not perform left edge filter for left columns frags */
1804  if (x > 0) {
1805  s->vp3dsp.h_loop_filter(
1806  plane_data + 8 * x,
1807  stride, bounding_values);
1808  }
1809 
1810  /* do not perform top edge filter for top row fragments */
1811  if (y > 0) {
1812  s->vp3dsp.v_loop_filter(
1813  plane_data + 8 * x,
1814  stride, bounding_values);
1815  }
1816 
1817  /* do not perform right edge filter for right column
1818  * fragments or if right fragment neighbor is also coded
1819  * in this frame (it will be filtered in next iteration) */
1820  if ((x < width - 1) &&
1821  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1822  s->vp3dsp.h_loop_filter(
1823  plane_data + 8 * x + 8,
1824  stride, bounding_values);
1825  }
1826 
1827  /* do not perform bottom edge filter for bottom row
1828  * fragments or if bottom fragment neighbor is also coded
1829  * in this frame (it will be filtered in the next row) */
1830  if ((y < height - 1) &&
1831  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1832  s->vp3dsp.v_loop_filter(
1833  plane_data + 8 * x + 8 * stride,
1834  stride, bounding_values);
1835  }
1836  }
1837 
1838  fragment++;
1839  }
1840  plane_data += 8 * stride;
1841  }
1842 }
1843 
1844 /**
1845  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1846  * for the next block in coding order
1847  */
1848 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1849  int plane, int inter, int16_t block[64])
1850 {
1851  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1852  uint8_t *perm = s->idct_scantable;
1853  int i = 0;
1854 
1855  do {
1856  int token = *s->dct_tokens[plane][i];
1857  switch (token & 3) {
1858  case 0: // EOB
1859  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1860  s->dct_tokens[plane][i]++;
1861  else
1862  *s->dct_tokens[plane][i] = token & ~3;
1863  goto end;
1864  case 1: // zero run
1865  s->dct_tokens[plane][i]++;
1866  i += (token >> 2) & 0x7f;
1867  if (i > 63) {
1868  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1869  return i;
1870  }
1871  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1872  i++;
1873  break;
1874  case 2: // coeff
1875  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1876  s->dct_tokens[plane][i++]++;
1877  break;
1878  default: // shouldn't happen
1879  return i;
1880  }
1881  } while (i < 64);
1882  // return value is expected to be a valid level
1883  i--;
1884 end:
1885  // the actual DC+prediction is in the fragment structure
1886  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1887  return i;
1888 }
1889 
1890 /**
1891  * called when all pixels up to row y are complete
1892  */
1894 {
1895  int h, cy, i;
1897 
1898  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1899  int y_flipped = s->flipped_image ? s->height - y : y;
1900 
1901  /* At the end of the frame, report INT_MAX instead of the height of
1902  * the frame. This makes the other threads' ff_thread_await_progress()
1903  * calls cheaper, because they don't have to clip their values. */
1905  y_flipped == s->height ? INT_MAX
1906  : y_flipped - 1,
1907  0);
1908  }
1909 
1910  if (!s->avctx->draw_horiz_band)
1911  return;
1912 
1913  h = y - s->last_slice_end;
1914  s->last_slice_end = y;
1915  y -= h;
1916 
1917  if (!s->flipped_image)
1918  y = s->height - y - h;
1919 
1920  cy = y >> s->chroma_y_shift;
1921  offset[0] = s->current_frame.f->linesize[0] * y;
1922  offset[1] = s->current_frame.f->linesize[1] * cy;
1923  offset[2] = s->current_frame.f->linesize[2] * cy;
1924  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1925  offset[i] = 0;
1926 
1927  emms_c();
1928  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1929 }
1930 
1931 /**
1932  * Wait for the reference frame of the current fragment.
1933  * The progress value is in luma pixel rows.
1934  */
1936  int motion_y, int y)
1937 {
1938  ThreadFrame *ref_frame;
1939  int ref_row;
1940  int border = motion_y & 1;
1941 
1942  if (fragment->coding_method == MODE_USING_GOLDEN ||
1943  fragment->coding_method == MODE_GOLDEN_MV)
1944  ref_frame = &s->golden_frame;
1945  else
1946  ref_frame = &s->last_frame;
1947 
1948  ref_row = y + (motion_y >> 1);
1949  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1950 
1951  ff_thread_await_progress(ref_frame, ref_row, 0);
1952 }
1953 
1954 #if CONFIG_VP4_DECODER
1955 /**
1956  * @return non-zero if temp (edge_emu_buffer) was populated
1957  */
1958 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1959  uint8_t * motion_source, int stride, int src_x, int src_y, uint8_t *temp)
1960 {
1961  int motion_shift = plane ? 4 : 2;
1962  int subpel_mask = plane ? 3 : 1;
1963  int *bounding_values = s->bounding_values_array + 127;
1964 
1965  int i;
1966  int x, y;
1967  int x2, y2;
1968  int x_subpel, y_subpel;
1969  int x_offset, y_offset;
1970 
1971  int block_width = plane ? 8 : 16;
1972  int plane_width = s->width >> (plane && s->chroma_x_shift);
1973  int plane_height = s->height >> (plane && s->chroma_y_shift);
1974 
1975 #define loop_stride 12
1976  uint8_t loop[12 * loop_stride];
1977 
1978  /* using division instead of shift to correctly handle negative values */
1979  x = 8 * bx + motion_x / motion_shift;
1980  y = 8 * by + motion_y / motion_shift;
1981 
1982  x_subpel = motion_x & subpel_mask;
1983  y_subpel = motion_y & subpel_mask;
1984 
1985  if (x_subpel || y_subpel) {
1986  x--;
1987  y--;
1988 
1989  if (x_subpel)
1990  x = FFMIN(x, x + FFSIGN(motion_x));
1991 
1992  if (y_subpel)
1993  y = FFMIN(y, y + FFSIGN(motion_y));
1994 
1995  x2 = x + block_width;
1996  y2 = y + block_width;
1997 
1998  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1999  return 0;
2000 
2001  x_offset = (-(x + 2) & 7) + 2;
2002  y_offset = (-(y + 2) & 7) + 2;
2003 
2004  if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2005  return 0;
2006 
2007  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2008  loop_stride, stride,
2009  12, 12, src_x - 1, src_y - 1,
2010  plane_width,
2011  plane_height);
2012 
2013  if (x_offset <= 8 + x_subpel)
2014  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2015 
2016  if (y_offset <= 8 + y_subpel)
2017  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2018 
2019  } else {
2020 
2021  x_offset = -x & 7;
2022  y_offset = -y & 7;
2023 
2024  if (!x_offset && !y_offset)
2025  return 0;
2026 
2027  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2028  loop_stride, stride,
2029  12, 12, src_x - 1, src_y - 1,
2030  plane_width,
2031  plane_height);
2032 
2033 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2034  if ((uintptr_t)(ptr) & 7) \
2035  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2036  else \
2037  s->vp3dsp.name(ptr, stride, bounding_values);
2038 
2039  if (x_offset)
2040  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2041 
2042  if (y_offset)
2043  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2044  }
2045 
2046  for (i = 0; i < 9; i++)
2047  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2048 
2049  return 1;
2050 }
2051 #endif
2052 
2053 /*
2054  * Perform the final rendering for a particular slice of data.
2055  * The slice number ranges from 0..(c_superblock_height - 1).
2056  */
2057 static void render_slice(Vp3DecodeContext *s, int slice)
2058 {
2059  int x, y, i, j, fragment;
2060  int16_t *block = s->block;
2061  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2062  int motion_halfpel_index;
2063  uint8_t *motion_source;
2064  int plane, first_pixel;
2065 
2066  if (slice >= s->c_superblock_height)
2067  return;
2068 
2069  for (plane = 0; plane < 3; plane++) {
2070  uint8_t *output_plane = s->current_frame.f->data[plane] +
2071  s->data_offset[plane];
2072  uint8_t *last_plane = s->last_frame.f->data[plane] +
2073  s->data_offset[plane];
2074  uint8_t *golden_plane = s->golden_frame.f->data[plane] +
2075  s->data_offset[plane];
2076  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2077  int plane_width = s->width >> (plane && s->chroma_x_shift);
2078  int plane_height = s->height >> (plane && s->chroma_y_shift);
2079  int8_t(*motion_val)[2] = s->motion_val[!!plane];
2080 
2081  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
2082  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2083  int slice_width = plane ? s->c_superblock_width
2084  : s->y_superblock_width;
2085 
2086  int fragment_width = s->fragment_width[!!plane];
2087  int fragment_height = s->fragment_height[!!plane];
2088  int fragment_start = s->fragment_start[plane];
2089 
2090  int do_await = !plane && HAVE_THREADS &&
2092 
2093  if (!s->flipped_image)
2094  stride = -stride;
2095  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2096  continue;
2097 
2098  /* for each superblock row in the slice (both of them)... */
2099  for (; sb_y < slice_height; sb_y++) {
2100  /* for each superblock in a row... */
2101  for (sb_x = 0; sb_x < slice_width; sb_x++) {
2102  /* for each block in a superblock... */
2103  for (j = 0; j < 16; j++) {
2104  x = 4 * sb_x + hilbert_offset[j][0];
2105  y = 4 * sb_y + hilbert_offset[j][1];
2106  fragment = y * fragment_width + x;
2107 
2108  i = fragment_start + fragment;
2109 
2110  // bounds check
2111  if (x >= fragment_width || y >= fragment_height)
2112  continue;
2113 
2114  first_pixel = 8 * y * stride + 8 * x;
2115 
2116  if (do_await &&
2119  motion_val[fragment][1],
2120  (16 * y) >> s->chroma_y_shift);
2121 
2122  /* transform if this block was coded */
2123  if (s->all_fragments[i].coding_method != MODE_COPY) {
2126  motion_source = golden_plane;
2127  else
2128  motion_source = last_plane;
2129 
2130  motion_source += first_pixel;
2131  motion_halfpel_index = 0;
2132 
2133  /* sort out the motion vector if this fragment is coded
2134  * using a motion vector method */
2135  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2137  int src_x, src_y;
2138  int standard_mc = 1;
2139  motion_x = motion_val[fragment][0];
2140  motion_y = motion_val[fragment][1];
2141 #if CONFIG_VP4_DECODER
2142  if (plane && s->version >= 2) {
2143  motion_x = (motion_x >> 1) | (motion_x & 1);
2144  motion_y = (motion_y >> 1) | (motion_y & 1);
2145  }
2146 #endif
2147 
2148  src_x = (motion_x >> 1) + 8 * x;
2149  src_y = (motion_y >> 1) + 8 * y;
2150 
2151  motion_halfpel_index = motion_x & 0x01;
2152  motion_source += (motion_x >> 1);
2153 
2154  motion_halfpel_index |= (motion_y & 0x01) << 1;
2155  motion_source += ((motion_y >> 1) * stride);
2156 
2157 #if CONFIG_VP4_DECODER
2158  if (s->version >= 2) {
2160  if (stride < 0)
2161  temp -= 8 * stride;
2162  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2163  motion_source = temp;
2164  standard_mc = 0;
2165  }
2166  }
2167 #endif
2168 
2169  if (standard_mc && (
2170  src_x < 0 || src_y < 0 ||
2171  src_x + 9 >= plane_width ||
2172  src_y + 9 >= plane_height)) {
2174  if (stride < 0)
2175  temp -= 8 * stride;
2176 
2177  s->vdsp.emulated_edge_mc(temp, motion_source,
2178  stride, stride,
2179  9, 9, src_x, src_y,
2180  plane_width,
2181  plane_height);
2182  motion_source = temp;
2183  }
2184  }
2185 
2186  /* first, take care of copying a block from either the
2187  * previous or the golden frame */
2188  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2189  /* Note, it is possible to implement all MC cases
2190  * with put_no_rnd_pixels_l2 which would look more
2191  * like the VP3 source but this would be slower as
2192  * put_no_rnd_pixels_tab is better optimized */
2193  if (motion_halfpel_index != 3) {
2194  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2195  output_plane + first_pixel,
2196  motion_source, stride, 8);
2197  } else {
2198  /* d is 0 if motion_x and _y have the same sign,
2199  * else -1 */
2200  int d = (motion_x ^ motion_y) >> 31;
2201  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2202  motion_source - d,
2203  motion_source + stride + 1 + d,
2204  stride, 8);
2205  }
2206  }
2207 
2208  /* invert DCT and place (or add) in final output */
2209 
2210  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2211  vp3_dequant(s, s->all_fragments + i,
2212  plane, 0, block);
2213  s->vp3dsp.idct_put(output_plane + first_pixel,
2214  stride,
2215  block);
2216  } else {
2217  if (vp3_dequant(s, s->all_fragments + i,
2218  plane, 1, block)) {
2219  s->vp3dsp.idct_add(output_plane + first_pixel,
2220  stride,
2221  block);
2222  } else {
2223  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2224  stride, block);
2225  }
2226  }
2227  } else {
2228  /* copy directly from the previous frame */
2229  s->hdsp.put_pixels_tab[1][0](
2230  output_plane + first_pixel,
2231  last_plane + first_pixel,
2232  stride, 8);
2233  }
2234  }
2235  }
2236 
2237  // Filter up to the last row in the superblock row
2238  if (s->version < 2 && !s->skip_loop_filter)
2239  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2240  FFMIN(4 * sb_y + 3, fragment_height - 1));
2241  }
2242  }
2243 
2244  /* this looks like a good place for slice dispatch... */
2245  /* algorithm:
2246  * if (slice == s->macroblock_height - 1)
2247  * dispatch (both last slice & 2nd-to-last slice);
2248  * else if (slice > 0)
2249  * dispatch (slice - 1);
2250  */
2251 
2252  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2253  s->height - 16));
2254 }
2255 
2256 /// Allocate tables for per-frame data in Vp3DecodeContext
2258 {
2259  Vp3DecodeContext *s = avctx->priv_data;
2260  int y_fragment_count, c_fragment_count;
2261 
2262  free_tables(avctx);
2263 
2264  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2265  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2266 
2267  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2270 
2271  s-> kf_coded_fragment_list = av_mallocz_array(s->fragment_count, sizeof(int));
2273  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2274 
2276  64 * sizeof(*s->dct_tokens_base));
2277  s->motion_val[0] = av_mallocz_array(y_fragment_count, sizeof(*s->motion_val[0]));
2278  s->motion_val[1] = av_mallocz_array(c_fragment_count, sizeof(*s->motion_val[1]));
2279 
2280  /* work out the block mapping tables */
2281  s->superblock_fragments = av_mallocz_array(s->superblock_count, 16 * sizeof(int));
2283 
2284  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2285 
2286  if (!s->superblock_coding || !s->all_fragments ||
2290  !s->dc_pred_row ||
2291  !s->motion_val[0] || !s->motion_val[1]) {
2292  return -1;
2293  }
2294 
2295  init_block_mapping(s);
2296 
2297  return 0;
2298 }
2299 
2301 {
2303  s->last_frame.f = av_frame_alloc();
2304  s->golden_frame.f = av_frame_alloc();
2305 
2306  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f)
2307  return AVERROR(ENOMEM);
2308 
2309  return 0;
2310 }
2311 
2312 static av_cold int theora_init_huffman_tables(VLC *vlc, const HuffTable *huff)
2313 {
2314  uint32_t code = 0, codes[32];
2315 
2316  for (unsigned i = 0; i < huff->nb_entries; i++) {
2317  codes[i] = code >> (31 - huff->entries[i].len);
2318  code += 0x80000000U >> huff->entries[i].len;
2319  }
2320  return ff_init_vlc_sparse(vlc, 11, huff->nb_entries,
2321  &huff->entries[0].len, sizeof(huff->entries[0]), 1,
2322  codes, 4, 4,
2323  &huff->entries[0].sym, sizeof(huff->entries[0]), 1, 0);
2324 }
2325 
2327 {
2328  Vp3DecodeContext *s = avctx->priv_data;
2329  int i, inter, plane, ret;
2330  int c_width;
2331  int c_height;
2332  int y_fragment_count, c_fragment_count;
2333 #if CONFIG_VP4_DECODER
2334  int j;
2335 #endif
2336 
2337  ret = init_frames(s);
2338  if (ret < 0)
2339  return ret;
2340 
2341  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0'))
2342  s->version = 3;
2343  else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2344  s->version = 0;
2345  else
2346  s->version = 1;
2347 
2348  s->avctx = avctx;
2349  s->width = FFALIGN(avctx->coded_width, 16);
2350  s->height = FFALIGN(avctx->coded_height, 16);
2351  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2352  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2355  ff_videodsp_init(&s->vdsp, 8);
2356  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2357 
2358  for (i = 0; i < 64; i++) {
2359 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2360  s->idct_permutation[i] = TRANSPOSE(i);
2362 #undef TRANSPOSE
2363  }
2364 
2365  /* initialize to an impossible value which will force a recalculation
2366  * in the first frame decode */
2367  for (i = 0; i < 3; i++)
2368  s->qps[i] = -1;
2369 
2371  if (ret)
2372  return ret;
2373 
2374  s->y_superblock_width = (s->width + 31) / 32;
2375  s->y_superblock_height = (s->height + 31) / 32;
2377 
2378  /* work out the dimensions for the C planes */
2379  c_width = s->width >> s->chroma_x_shift;
2380  c_height = s->height >> s->chroma_y_shift;
2381  s->c_superblock_width = (c_width + 31) / 32;
2382  s->c_superblock_height = (c_height + 31) / 32;
2384 
2388 
2389  s->macroblock_width = (s->width + 15) / 16;
2390  s->macroblock_height = (s->height + 15) / 16;
2392  s->c_macroblock_width = (c_width + 15) / 16;
2393  s->c_macroblock_height = (c_height + 15) / 16;
2396 
2397  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2398  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2399  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2400  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2401 
2402  /* fragment count covers all 8x8 blocks for all 3 planes */
2403  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2404  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2405  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2406  s->fragment_start[1] = y_fragment_count;
2407  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2408 
2409  if (!s->theora_tables) {
2410  for (i = 0; i < 64; i++) {
2418  }
2419 
2420  for (inter = 0; inter < 2; inter++) {
2421  for (plane = 0; plane < 3; plane++) {
2422  s->qr_count[inter][plane] = 1;
2423  s->qr_size[inter][plane][0] = 63;
2424  s->qr_base[inter][plane][0] =
2425  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2426  }
2427  }
2428 
2429  /* init VLC tables */
2430  if (s->version < 2) {
2431  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2432  if ((ret = init_vlc(&s->coeff_vlc[i], 11, 32,
2433  &vp3_bias[i][0][1], 4, 2,
2434  &vp3_bias[i][0][0], 4, 2, 0)) < 0)
2435  return ret;
2436  }
2437 #if CONFIG_VP4_DECODER
2438  } else { /* version >= 2 */
2439  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2440  if ((ret = init_vlc(&s->coeff_vlc[i], 11, 32,
2441  &vp4_bias[i][0][1], 4, 2,
2442  &vp4_bias[i][0][0], 4, 2, 0)) < 0)
2443  return ret;
2444  }
2445 #endif
2446  }
2447  } else {
2448  for (i = 0; i < FF_ARRAY_ELEMS(s->coeff_vlc); i++) {
2449  ret = theora_init_huffman_tables(&s->coeff_vlc[i], &s->huffman_table[i]);
2450  if (ret < 0)
2451  return ret;
2452  }
2453  }
2454 
2455  if ((ret = init_vlc(&s->superblock_run_length_vlc, 6, 34,
2456  &superblock_run_length_vlc_table[0][1], 4, 2,
2457  &superblock_run_length_vlc_table[0][0], 4, 2, 0)) < 0)
2458  return ret;
2459 
2460  if ((ret = init_vlc(&s->fragment_run_length_vlc, 5, 30,
2461  &fragment_run_length_vlc_table[0][1], 4, 2,
2462  &fragment_run_length_vlc_table[0][0], 4, 2, 0)) < 0)
2463  return ret;
2464 
2465  if ((ret = init_vlc(&s->mode_code_vlc, 3, 8,
2466  &mode_code_vlc_table[0][1], 2, 1,
2467  &mode_code_vlc_table[0][0], 2, 1, 0)) < 0)
2468  return ret;
2469 
2470  if ((ret = init_vlc(&s->motion_vector_vlc, 6, 63,
2471  &motion_vector_vlc_table[0][1], 2, 1,
2472  &motion_vector_vlc_table[0][0], 2, 1, 0)) < 0)
2473  return ret;
2474 
2475 #if CONFIG_VP4_DECODER
2476  for (j = 0; j < 2; j++)
2477  for (i = 0; i < 7; i++)
2478  if ((ret = init_vlc(&s->vp4_mv_vlc[j][i], 6, 63,
2479  &vp4_mv_vlc[j][i][0][1], 4, 2,
2480  &vp4_mv_vlc[j][i][0][0], 4, 2, 0)) < 0)
2481  return ret;
2482 
2483  /* version >= 2 */
2484  for (i = 0; i < 2; i++)
2485  if ((ret = init_vlc(&s->block_pattern_vlc[i], 3, 14,
2486  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2487  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0)) < 0)
2488  return ret;
2489 #endif
2490 
2491  return allocate_tables(avctx);
2492 }
2493 
2494 /// Release and shuffle frames after decode finishes
2495 static int update_frames(AVCodecContext *avctx)
2496 {
2497  Vp3DecodeContext *s = avctx->priv_data;
2498  int ret = 0;
2499 
2500  /* shuffle frames (last = current) */
2503  if (ret < 0)
2504  goto fail;
2505 
2506  if (s->keyframe) {
2509  }
2510 
2511 fail:
2513  return ret;
2514 }
2515 
2516 #if HAVE_THREADS
2517 static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
2518 {
2520  if (src->f->data[0])
2521  return ff_thread_ref_frame(dst, src);
2522  return 0;
2523 }
2524 
2525 static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
2526 {
2527  int ret;
2528  if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
2529  (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
2530  (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
2531  return ret;
2532  return 0;
2533 }
2534 
2535 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2536 {
2537  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
2538  int qps_changed = 0, i, err;
2539 
2540  if (!s1->current_frame.f->data[0] ||
2541  s->width != s1->width || s->height != s1->height) {
2542  if (s != s1)
2543  ref_frames(s, s1);
2544  return -1;
2545  }
2546 
2547  if (s != s1) {
2548  // copy previous frame data
2549  if ((err = ref_frames(s, s1)) < 0)
2550  return err;
2551 
2552  s->keyframe = s1->keyframe;
2553 
2554  // copy qscale data if necessary
2555  for (i = 0; i < 3; i++) {
2556  if (s->qps[i] != s1->qps[1]) {
2557  qps_changed = 1;
2558  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2559  }
2560  }
2561 
2562  if (s->qps[0] != s1->qps[0])
2563  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2564  sizeof(s->bounding_values_array));
2565 
2566  if (qps_changed) {
2567  memcpy(s->qps, s1->qps, sizeof(s->qps));
2568  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2569  s->nqps = s1->nqps;
2570  }
2571  }
2572 
2573  return update_frames(dst);
2574 }
2575 #endif
2576 
2578  void *data, int *got_frame,
2579  AVPacket *avpkt)
2580 {
2581  AVFrame *frame = data;
2582  const uint8_t *buf = avpkt->data;
2583  int buf_size = avpkt->size;
2584  Vp3DecodeContext *s = avctx->priv_data;
2585  GetBitContext gb;
2586  int i, ret;
2587 
2588  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2589  return ret;
2590 
2591 #if CONFIG_THEORA_DECODER
2592  if (s->theora && get_bits1(&gb)) {
2593  int type = get_bits(&gb, 7);
2594  skip_bits_long(&gb, 6*8); /* "theora" */
2595 
2597  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2598  return AVERROR_PATCHWELCOME;
2599  }
2600  if (type == 0) {
2601  vp3_decode_end(avctx);
2602  ret = theora_decode_header(avctx, &gb);
2603 
2604  if (ret >= 0)
2605  ret = vp3_decode_init(avctx);
2606  if (ret < 0) {
2607  vp3_decode_end(avctx);
2608  return ret;
2609  }
2610  return buf_size;
2611  } else if (type == 2) {
2612  vp3_decode_end(avctx);
2613  ret = theora_decode_tables(avctx, &gb);
2614  if (ret >= 0)
2615  ret = vp3_decode_init(avctx);
2616  if (ret < 0) {
2617  vp3_decode_end(avctx);
2618  return ret;
2619  }
2620  return buf_size;
2621  }
2622 
2623  av_log(avctx, AV_LOG_ERROR,
2624  "Header packet passed to frame decoder, skipping\n");
2625  return -1;
2626  }
2627 #endif
2628 
2629  s->keyframe = !get_bits1(&gb);
2630  if (!s->all_fragments) {
2631  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2632  return -1;
2633  }
2634  if (!s->theora)
2635  skip_bits(&gb, 1);
2636  for (i = 0; i < 3; i++)
2637  s->last_qps[i] = s->qps[i];
2638 
2639  s->nqps = 0;
2640  do {
2641  s->qps[s->nqps++] = get_bits(&gb, 6);
2642  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2643  for (i = s->nqps; i < 3; i++)
2644  s->qps[i] = -1;
2645 
2646  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2647  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2648  s->keyframe ? "key" : "", avctx->frame_number + 1, s->qps[0]);
2649 
2650  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2651  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2652  : AVDISCARD_NONKEY);
2653 
2654  if (s->qps[0] != s->last_qps[0])
2655  init_loop_filter(s);
2656 
2657  for (i = 0; i < s->nqps; i++)
2658  // reinit all dequantizers if the first one changed, because
2659  // the DC of the first quantizer must be used for all matrices
2660  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2661  init_dequantizer(s, i);
2662 
2663  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2664  return buf_size;
2665 
2668  s->current_frame.f->key_frame = s->keyframe;
2670  goto error;
2671 
2672  if (!s->edge_emu_buffer)
2674 
2675  if (s->keyframe) {
2676  if (!s->theora) {
2677  skip_bits(&gb, 4); /* width code */
2678  skip_bits(&gb, 4); /* height code */
2679  if (s->version) {
2680  s->version = get_bits(&gb, 5);
2681  if (avctx->frame_number == 0)
2683  "VP version: %d\n", s->version);
2684  }
2685  }
2686  if (s->version || s->theora) {
2687  if (get_bits1(&gb))
2689  "Warning, unsupported keyframe coding type?!\n");
2690  skip_bits(&gb, 2); /* reserved? */
2691 
2692 #if CONFIG_VP4_DECODER
2693  if (s->version >= 2) {
2694  int mb_height, mb_width;
2695  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2696 
2697  mb_height = get_bits(&gb, 8);
2698  mb_width = get_bits(&gb, 8);
2699  if (mb_height != s->macroblock_height ||
2700  mb_width != s->macroblock_width)
2701  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2702 
2703  mb_width_mul = get_bits(&gb, 5);
2704  mb_width_div = get_bits(&gb, 3);
2705  mb_height_mul = get_bits(&gb, 5);
2706  mb_height_div = get_bits(&gb, 3);
2707  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2708  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2709 
2710  if (get_bits(&gb, 2))
2711  avpriv_request_sample(s->avctx, "unknown bits");
2712  }
2713 #endif
2714  }
2715  } else {
2716  if (!s->golden_frame.f->data[0]) {
2718  "vp3: first frame not a keyframe\n");
2719 
2721  if ((ret = ff_thread_get_buffer(avctx, &s->golden_frame,
2722  AV_GET_BUFFER_FLAG_REF)) < 0)
2723  goto error;
2725  if ((ret = ff_thread_ref_frame(&s->last_frame,
2726  &s->golden_frame)) < 0)
2727  goto error;
2728  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2729  }
2730  }
2731 
2732  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2733  ff_thread_finish_setup(avctx);
2734 
2735  if (s->version < 2) {
2736  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2737  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2738  goto error;
2739  }
2740 #if CONFIG_VP4_DECODER
2741  } else {
2742  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2743  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2744  goto error;
2745  }
2746 #endif
2747  }
2748  if ((ret = unpack_modes(s, &gb)) < 0) {
2749  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2750  goto error;
2751  }
2752  if (ret = unpack_vectors(s, &gb)) {
2753  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2754  goto error;
2755  }
2756  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2757  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2758  goto error;
2759  }
2760 
2761  if (s->version < 2) {
2762  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2763  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2764  goto error;
2765  }
2766 #if CONFIG_VP4_DECODER
2767  } else {
2768  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2769  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2770  goto error;
2771  }
2772 #endif
2773  }
2774 
2775  for (i = 0; i < 3; i++) {
2776  int height = s->height >> (i && s->chroma_y_shift);
2777  if (s->flipped_image)
2778  s->data_offset[i] = 0;
2779  else
2780  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2781  }
2782 
2783  s->last_slice_end = 0;
2784  for (i = 0; i < s->c_superblock_height; i++)
2785  render_slice(s, i);
2786 
2787  // filter the last row
2788  if (s->version < 2)
2789  for (i = 0; i < 3; i++) {
2790  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2791  apply_loop_filter(s, i, row, row + 1);
2792  }
2793  vp3_draw_horiz_band(s, s->height);
2794 
2795  /* output frame, offset as needed */
2796  if ((ret = av_frame_ref(data, s->current_frame.f)) < 0)
2797  return ret;
2798 
2799  frame->crop_left = s->offset_x;
2800  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2801  frame->crop_top = s->offset_y;
2802  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2803 
2804  *got_frame = 1;
2805 
2806  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
2807  ret = update_frames(avctx);
2808  if (ret < 0)
2809  return ret;
2810  }
2811 
2812  return buf_size;
2813 
2814 error:
2815  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2816 
2817  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2819 
2820  return ret;
2821 }
2822 
2824  AVCodecContext *avctx)
2825 {
2826  if (get_bits1(gb)) {
2827  int token;
2828  if (huff->nb_entries >= 32) { /* overflow */
2829  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2830  return -1;
2831  }
2832  token = get_bits(gb, 5);
2833  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2834  length, huff->nb_entries, token);
2835  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2836  } else {
2837  /* The following bound follows from the fact that nb_entries <= 32. */
2838  if (length >= 31) { /* overflow */
2839  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2840  return -1;
2841  }
2842  length++;
2843  if (read_huffman_tree(huff, gb, length, avctx))
2844  return -1;
2845  if (read_huffman_tree(huff, gb, length, avctx))
2846  return -1;
2847  }
2848  return 0;
2849 }
2850 
2851 #if CONFIG_THEORA_DECODER
2852 static const enum AVPixelFormat theora_pix_fmts[4] = {
2854 };
2855 
2856 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2857 {
2858  Vp3DecodeContext *s = avctx->priv_data;
2859  int visible_width, visible_height, colorspace;
2860  uint8_t offset_x = 0, offset_y = 0;
2861  int ret;
2862  AVRational fps, aspect;
2863 
2864  s->theora_header = 0;
2865  s->theora = get_bits(gb, 24);
2866  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2867  if (!s->theora) {
2868  s->theora = 1;
2869  avpriv_request_sample(s->avctx, "theora 0");
2870  }
2871 
2872  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2873  * but previous versions have the image flipped relative to vp3 */
2874  if (s->theora < 0x030200) {
2875  s->flipped_image = 1;
2876  av_log(avctx, AV_LOG_DEBUG,
2877  "Old (<alpha3) Theora bitstream, flipped image\n");
2878  }
2879 
2880  visible_width =
2881  s->width = get_bits(gb, 16) << 4;
2882  visible_height =
2883  s->height = get_bits(gb, 16) << 4;
2884 
2885  if (s->theora >= 0x030200) {
2886  visible_width = get_bits(gb, 24);
2887  visible_height = get_bits(gb, 24);
2888 
2889  offset_x = get_bits(gb, 8); /* offset x */
2890  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2891  }
2892 
2893  /* sanity check */
2894  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2895  visible_width + offset_x > s->width ||
2896  visible_height + offset_y > s->height) {
2897  av_log(avctx, AV_LOG_ERROR,
2898  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2899  visible_width, visible_height, offset_x, offset_y,
2900  s->width, s->height);
2901  return AVERROR_INVALIDDATA;
2902  }
2903 
2904  fps.num = get_bits_long(gb, 32);
2905  fps.den = get_bits_long(gb, 32);
2906  if (fps.num && fps.den) {
2907  if (fps.num < 0 || fps.den < 0) {
2908  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2909  return AVERROR_INVALIDDATA;
2910  }
2911  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2912  fps.den, fps.num, 1 << 30);
2913  }
2914 
2915  aspect.num = get_bits(gb, 24);
2916  aspect.den = get_bits(gb, 24);
2917  if (aspect.num && aspect.den) {
2919  &avctx->sample_aspect_ratio.den,
2920  aspect.num, aspect.den, 1 << 30);
2921  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2922  }
2923 
2924  if (s->theora < 0x030200)
2925  skip_bits(gb, 5); /* keyframe frequency force */
2926  colorspace = get_bits(gb, 8);
2927  skip_bits(gb, 24); /* bitrate */
2928 
2929  skip_bits(gb, 6); /* quality hint */
2930 
2931  if (s->theora >= 0x030200) {
2932  skip_bits(gb, 5); /* keyframe frequency force */
2933  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2934  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2935  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2936  return AVERROR_INVALIDDATA;
2937  }
2938  skip_bits(gb, 3); /* reserved */
2939  } else
2940  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2941 
2942  ret = ff_set_dimensions(avctx, s->width, s->height);
2943  if (ret < 0)
2944  return ret;
2945  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2946  avctx->width = visible_width;
2947  avctx->height = visible_height;
2948  // translate offsets from theora axis ([0,0] lower left)
2949  // to normal axis ([0,0] upper left)
2950  s->offset_x = offset_x;
2951  s->offset_y = s->height - visible_height - offset_y;
2952  }
2953 
2954  if (colorspace == 1)
2956  else if (colorspace == 2)
2958 
2959  if (colorspace == 1 || colorspace == 2) {
2960  avctx->colorspace = AVCOL_SPC_BT470BG;
2961  avctx->color_trc = AVCOL_TRC_BT709;
2962  }
2963 
2964  s->theora_header = 1;
2965  return 0;
2966 }
2967 
2968 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2969 {
2970  Vp3DecodeContext *s = avctx->priv_data;
2971  int i, n, matrices, inter, plane, ret;
2972 
2973  if (!s->theora_header)
2974  return AVERROR_INVALIDDATA;
2975 
2976  if (s->theora >= 0x030200) {
2977  n = get_bits(gb, 3);
2978  /* loop filter limit values table */
2979  if (n)
2980  for (i = 0; i < 64; i++)
2981  s->filter_limit_values[i] = get_bits(gb, n);
2982  }
2983 
2984  if (s->theora >= 0x030200)
2985  n = get_bits(gb, 4) + 1;
2986  else
2987  n = 16;
2988  /* quality threshold table */
2989  for (i = 0; i < 64; i++)
2990  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2991 
2992  if (s->theora >= 0x030200)
2993  n = get_bits(gb, 4) + 1;
2994  else
2995  n = 16;
2996  /* dc scale factor table */
2997  for (i = 0; i < 64; i++)
2998  s->coded_dc_scale_factor[0][i] =
2999  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3000 
3001  if (s->theora >= 0x030200)
3002  matrices = get_bits(gb, 9) + 1;
3003  else
3004  matrices = 3;
3005 
3006  if (matrices > 384) {
3007  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3008  return -1;
3009  }
3010 
3011  for (n = 0; n < matrices; n++)
3012  for (i = 0; i < 64; i++)
3013  s->base_matrix[n][i] = get_bits(gb, 8);
3014 
3015  for (inter = 0; inter <= 1; inter++) {
3016  for (plane = 0; plane <= 2; plane++) {
3017  int newqr = 1;
3018  if (inter || plane > 0)
3019  newqr = get_bits1(gb);
3020  if (!newqr) {
3021  int qtj, plj;
3022  if (inter && get_bits1(gb)) {
3023  qtj = 0;
3024  plj = plane;
3025  } else {
3026  qtj = (3 * inter + plane - 1) / 3;
3027  plj = (plane + 2) % 3;
3028  }
3029  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3030  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3031  sizeof(s->qr_size[0][0]));
3032  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3033  sizeof(s->qr_base[0][0]));
3034  } else {
3035  int qri = 0;
3036  int qi = 0;
3037 
3038  for (;;) {
3039  i = get_bits(gb, av_log2(matrices - 1) + 1);
3040  if (i >= matrices) {
3041  av_log(avctx, AV_LOG_ERROR,
3042  "invalid base matrix index\n");
3043  return -1;
3044  }
3045  s->qr_base[inter][plane][qri] = i;
3046  if (qi >= 63)
3047  break;
3048  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3049  s->qr_size[inter][plane][qri++] = i;
3050  qi += i;
3051  }
3052 
3053  if (qi > 63) {
3054  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3055  return -1;
3056  }
3057  s->qr_count[inter][plane] = qri;
3058  }
3059  }
3060  }
3061 
3062  /* Huffman tables */
3063  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3064  s->huffman_table[i].nb_entries = 0;
3065  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3066  return ret;
3067  }
3068 
3069  s->theora_tables = 1;
3070 
3071  return 0;
3072 }
3073 
3074 static av_cold int theora_decode_init(AVCodecContext *avctx)
3075 {
3076  Vp3DecodeContext *s = avctx->priv_data;
3077  GetBitContext gb;
3078  int ptype;
3079  const uint8_t *header_start[3];
3080  int header_len[3];
3081  int i;
3082  int ret;
3083 
3084  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3085 
3086  s->theora = 1;
3087 
3088  if (!avctx->extradata_size) {
3089  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3090  return -1;
3091  }
3092 
3094  42, header_start, header_len) < 0) {
3095  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3096  return -1;
3097  }
3098 
3099  for (i = 0; i < 3; i++) {
3100  if (header_len[i] <= 0)
3101  continue;
3102  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3103  if (ret < 0)
3104  return ret;
3105 
3106  ptype = get_bits(&gb, 8);
3107 
3108  if (!(ptype & 0x80)) {
3109  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3110 // return -1;
3111  }
3112 
3113  // FIXME: Check for this as well.
3114  skip_bits_long(&gb, 6 * 8); /* "theora" */
3115 
3116  switch (ptype) {
3117  case 0x80:
3118  if (theora_decode_header(avctx, &gb) < 0)
3119  return -1;
3120  break;
3121  case 0x81:
3122 // FIXME: is this needed? it breaks sometimes
3123 // theora_decode_comments(avctx, gb);
3124  break;
3125  case 0x82:
3126  if (theora_decode_tables(avctx, &gb))
3127  return -1;
3128  break;
3129  default:
3130  av_log(avctx, AV_LOG_ERROR,
3131  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3132  break;
3133  }
3134  if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb))
3135  av_log(avctx, AV_LOG_WARNING,
3136  "%d bits left in packet %X\n",
3137  8 * header_len[i] - get_bits_count(&gb), ptype);
3138  if (s->theora < 0x030200)
3139  break;
3140  }
3141 
3142  return vp3_decode_init(avctx);
3143 }
3144 
3146  .name = "theora",
3147  .long_name = NULL_IF_CONFIG_SMALL("Theora"),
3148  .type = AVMEDIA_TYPE_VIDEO,
3149  .id = AV_CODEC_ID_THEORA,
3150  .priv_data_size = sizeof(Vp3DecodeContext),
3151  .init = theora_decode_init,
3152  .close = vp3_decode_end,
3157  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3160 };
3161 #endif
3162 
3164  .name = "vp3",
3165  .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
3166  .type = AVMEDIA_TYPE_VIDEO,
3167  .id = AV_CODEC_ID_VP3,
3168  .priv_data_size = sizeof(Vp3DecodeContext),
3169  .init = vp3_decode_init,
3170  .close = vp3_decode_end,
3175  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3177 };
3178 
3179 #if CONFIG_VP4_DECODER
3181  .name = "vp4",
3182  .long_name = NULL_IF_CONFIG_SMALL("On2 VP4"),
3183  .type = AVMEDIA_TYPE_VIDEO,
3184  .id = AV_CODEC_ID_VP4,
3185  .priv_data_size = sizeof(Vp3DecodeContext),
3186  .init = vp3_decode_init,
3187  .close = vp3_decode_end,
3192  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3194 };
3195 #endif
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
#define BLOCK_Y
Definition: vp3.c:639
AVCodec ff_vp4_decoder
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
int last_slice_end
Definition: vp3.c:184
uint8_t idct_scantable[64]
Definition: vp3.c:178
AVRational framerate
Definition: avcodec.h:2081
discard all frames except keyframes
Definition: avcodec.h:235
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:315
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:282
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments, macroblocks <-> fragments, superblocks <-> macroblocks.
Definition: vp3.c:377
#define SB_NOT_CODED
Definition: vp3.c:60
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
#define TOKEN_EOB(eob_run)
Definition: vp3.c:250
HuffTable huffman_table[5 *16]
Definition: vp3.c:297
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2057
#define PUR
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int y_superblock_count
Definition: vp3.c:194
static void flush(AVCodecContext *avctx)
int bounding_values_array[256+2]
Definition: vp3.c:300
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
Definition: vp3dsp.h:36
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1140
uint16_t qr_base[2][3][64]
Definition: vp3.c:229
AVFrame * f
Definition: thread.h:35
else temp
Definition: vf_mcdeint.c:256
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
VLC mode_code_vlc
Definition: vp3.c:276
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
int y_superblock_width
Definition: vp3.c:192
static const uint16_t fragment_run_length_vlc_table[30][2]
Definition: vp3data.h:119
HpelDSPContext hdsp
Definition: vp3.c:179
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
#define avpriv_request_sample(...)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:275
#define MODE_INTER_PLUS_MV
Definition: vp3.c:71
int num
Numerator.
Definition: rational.h:59
int size
Definition: packet.h:364
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:2300
int u_superblock_start
Definition: vp3.c:198
#define BLOCK_X
Definition: vp3.c:638
int av_log2(unsigned v)
Definition: intmath.c:26
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:783
static const uint8_t zero_run_base[32]
Definition: vp3data.h:207
void(* v_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
Definition: vp3dsp.h:44
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
uint8_t coding_method
Definition: vp3.c:56
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2326
GLint GLenum type
Definition: opengl_enc.c:104
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:222
int num_kf_coded_fragment[3]
Definition: vp3.c:267
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:463
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1631
discard all
Definition: avcodec.h:236
VLC motion_vector_vlc
Definition: vp3.c:277
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:337
static void error(const char *err)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int * superblock_fragments
Definition: vp3.c:288
VLC superblock_run_length_vlc
Definition: vp3.c:273
AVCodec.
Definition: codec.h:190
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:67
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, &#39;draw_horiz_band&#39; is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:766
Vp3Fragment * all_fragments
Definition: vp3.c:214
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:454
uint8_t base
Definition: vp3data.h:202
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1627
int dc
Definition: vp3.c:152
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
uint8_t offset_y
Definition: vp3.c:218
int y_superblock_height
Definition: vp3.c:193
#define TRANSPOSE(x)
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
uint8_t nb_entries
Definition: vp3.c:164
#define av_malloc(s)
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:896
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:251
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1617
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1848
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
static int FUNC() huffman_table(CodedBitstreamContext *ctx, RWContext *rw, JPEGRawHuffmanTable *current)
Multithreading support functions.
int macroblock_width
Definition: vp3.c:203
uint8_t idct_permutation[64]
Definition: vp3.c:177
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:411
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:632
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1025
uint8_t qpi
Definition: vp3.c:57
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
static av_cold int theora_init_huffman_tables(VLC *vlc, const HuffTable *huff)
Definition: vp3.c:2312
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:325
#define DC_COEFF(u)
Definition: vp3.c:1629
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
#define height
uint8_t * data
Definition: packet.h:363
uint8_t filter_limit_values[64]
Definition: vp3.c:299
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:121
#define ff_dlog(a,...)
bitstream reader API header.
static const uint8_t vp31_intra_c_dequant[64]
Definition: vp3data.h:42
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
static const uint8_t mode_code_vlc_table[8][2]
Definition: vp3data.h:144
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define FFALIGN(x, a)
Definition: macros.h:48
#define MODE_INTRA
Definition: vp3.c:70
#define av_log(a,...)
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1289
static const uint16_t table[]
Definition: prosumer.c:206
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:75
static void body(uint32_t ABCD[4], const uint8_t *src, int nblocks)
Definition: md5.c:101
int height
Definition: vp3.c:171
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:140
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2577
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:151
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:463
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:371
VP3DSPContext vp3dsp
Definition: vp3.c:181
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int c_superblock_width
Definition: vp3.c:195
uint8_t qr_count[2][3]
Definition: vp3.c:227
int fragment_height[2]
Definition: vp3.c:212
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
uint8_t sym
Definition: vp3.c:159
#define CODING_MODE_COUNT
Definition: vp3.c:77
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
void(* idct_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.h:42
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1809
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:189
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
void(* h_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
Definition: vp3dsp.h:45
AVCodec ff_theora_decoder
int theora
Definition: vp3.c:169
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:309
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
uint8_t bits
Definition: vp3data.h:202
int theora_header
Definition: vp3.c:169
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2823
GLsizei count
Definition: opengl_enc.c:108
#define FFMAX(a, b)
Definition: common.h:94
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:224
int qps[3]
Definition: vp3.c:187
#define fail()
Definition: checkasm.h:123
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:87
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:192
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:407
int chroma_y_shift
Definition: vp3.c:172
int flipped_image
Definition: vp3.c:183
static const struct @172 eob_run_table[7]
unsigned char * macroblock_coding
Definition: vp3.c:292
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
Half-pel DSP context.
Definition: hpeldsp.h:45
int fragment_width[2]
Definition: vp3.c:211
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
int type
Definition: vp3.c:153
#define SET_CHROMA_MODES
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
VLC block_pattern_vlc[2]
Definition: vp3.c:275
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1801
#define FFMIN(a, b)
Definition: common.h:96
VLC fragment_run_length_vlc
Definition: vp3.c:274
VLC vp4_mv_vlc[2][7]
Definition: vp3.c:278
#define PU
#define width
#define FFSIGN(a)
Definition: common.h:73
int macroblock_height
Definition: vp3.c:204
int width
picture width / height.
Definition: avcodec.h:704
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
#define SB_PARTIALLY_CODED
Definition: vp3.c:61
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1168
int yuv_macroblock_count
Definition: vp3.c:208
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:465
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
uint8_t * edge_emu_buffer
Definition: vp3.c:294
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1145
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:76
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
perm
Definition: f_perms.c:74
static const int8_t motion_vector_table[63]
Definition: vp3data.h:179
#define MODE_COPY
Definition: vp3.c:80
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
Definition: xiph.c:24
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:125
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
int macroblock_count
Definition: vp3.c:202
static const uint16_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:371
int c_superblock_height
Definition: vp3.c:196
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
int offset_x_warned
Definition: vp3.c:219
#define FF_ARRAY_ELEMS(a)
int total_num_coded_frags
Definition: vp3.c:259
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:1898
void(* idct_dc_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.h:43
int c_superblock_count
Definition: vp3.c:197
if(ret)
AVCodec ff_vp3_decoder
Definition: vp3.c:3163
VP4 video decoder.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1781
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
also ITU-R BT1361
Definition: pixfmt.h:485
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
Half-pel DSP functions.
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int superblock_count
Definition: vp3.c:191
Used to store optimal huffman encoding results.
uint8_t len
Definition: magicyuv.c:49
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
Libavcodec external API header.
HuffEntry entries[32]
Definition: vp3.c:163
enum AVCodecID codec_id
Definition: avcodec.h:541
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:248
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
int skip_loop_filter
Definition: vp3.c:185
static int loop
Definition: ffplay.c:341
int debug
debug
Definition: avcodec.h:1616
ThreadFrame current_frame
Definition: vp3.c:175
main external API structure.
Definition: avcodec.h:531
#define RSHIFT(a, b)
Definition: common.h:54
int last_qps[3]
Definition: vp3.c:189
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:556
uint8_t qr_size[2][3][64]
Definition: vp3.c:228
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define PUL
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2257
int data_offset[3]
Definition: vp3.c:216
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int extradata_size
Definition: avcodec.h:633
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
int coded_height
Definition: avcodec.h:719
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
#define SB_FULLY_CODED
Definition: vp3.c:62
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1152
int * nkf_coded_fragment_list
Definition: vp3.c:266
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:258
int keyframe
Definition: vp3.c:176
#define TOKEN_COEFF(coeff)
Definition: vp3.c:252
#define s1
Definition: regdef.h:38
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:886
static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2]
Definition: imc.c:121
#define MODE_GOLDEN_MV
Definition: vp3.c:75
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:65
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
#define FRAGMENT_PIXELS
Definition: vp3.c:51
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2495
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:8
static const uint16_t superblock_run_length_vlc_table[34][2]
Definition: vp3data.h:98
#define MODE_USING_GOLDEN
Definition: vp3.c:74
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define MODE_INTER_FOURMV
Definition: vp3.c:76
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
int16_t block[64]
Definition: vp3.c:182
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int v_superblock_start
Definition: vp3.c:199
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
static int theora_header(AVFormatContext *s, int idx)
int version
Definition: vp3.c:170
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
int * coded_fragment_list[3]
Definition: vp3.c:263
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:54
unsigned char * superblock_coding
Definition: vp3.c:200
VLC coeff_vlc[5 *16]
Definition: vp3.c:271
common internal api header.
ThreadFrame last_frame
Definition: vp3.c:174
int16_t * dct_tokens_base
Definition: vp3.c:249
AVCodecContext * avctx
Definition: vp3.c:168
#define bit(string, value)
Definition: cbs_mpeg2.c:58
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1132
VideoDSPContext vdsp
Definition: vp3.c:180
uint16_t sym
Definition: utvideoenc.c:41
int c_macroblock_width
Definition: vp3.c:206
int den
Denominator.
Definition: rational.h:60
int c_macroblock_count
Definition: vp3.c:205
Core video DSP helper functions.
uint8_t base_matrix[384][64]
Definition: vp3.c:226
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:473
int fragment_count
Definition: vp3.c:210
void * priv_data
Definition: avcodec.h:558
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1090
int * kf_coded_fragment_list
Definition: vp3.c:265
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1935
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
int c_macroblock_height
Definition: vp3.c:207
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:618
static const uint16_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:445
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:73
#define MODE_INTER_NO_MV
Definition: vp3.c:69
VP4Predictor * dc_pred_row
Definition: vp3.c:302
int fragment_start[3]
Definition: vp3.c:215
int theora_tables
Definition: vp3.c:169
#define av_freep(p)
static const uint16_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:608
#define VLC_TYPE
Definition: vlc.h:24
#define MODE_INTER_LAST_MV
Definition: vp3.c:72
#define av_malloc_array(a, b)
ThreadFrame golden_frame
Definition: vp3.c:173
int chroma_x_shift
Definition: vp3.c:172
void(* idct_put)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.h:41
#define stride
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:445
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:87
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
Definition: common.h:405
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1893
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:365
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
int16_t dc
Definition: vp3.c:55
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
uint8_t offset_x
Definition: vp3.c:217
for(j=16;j >0;--j)
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:225
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:214
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
#define PL
int8_t(*[2] motion_val)[2]
Definition: vp3.c:221