FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/thread.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "error_resilience.h"
36 #include "mpegutils.h"
37 #include "mpegvideo.h"
38 #include "mpegvideodec.h"
39 #include "golomb.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "qpeldsp.h"
43 #include "rectangle.h"
44 #include "thread.h"
45 #include "threadframe.h"
46 
47 #include "rv34vlc.h"
48 #include "rv34data.h"
49 #include "rv34.h"
50 
51 static inline void ZERO8x2(void* dst, int stride)
52 {
53  fill_rectangle(dst, 1, 2, stride, 0, 4);
54  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
55 }
56 
57 /** translation of RV30/40 macroblock types to lavc ones */
58 static const int rv34_mb_type_to_lavc[12] = {
71 };
72 
73 
75 
76 static int rv34_decode_mv(RV34DecContext *r, int block_type);
77 
78 /**
79  * @name RV30/40 VLC generating functions
80  * @{
81  */
82 
83 static VLCElem table_data[117592];
84 
85 /**
86  * Generate VLC from codeword lengths.
87  * @param bits codeword lengths (zeroes are accepted)
88  * @param size length of input data
89  * @param vlc output VLC
90  * @param insyms symbols for input codes (NULL for default ones)
91  * @param num VLC table number (for static initialization)
92  */
93 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
94  const uint8_t *syms, int *offset)
95 {
96  int counts[17] = {0}, codes[17];
97  uint16_t cw[MAX_VLC_SIZE];
98  int maxbits;
99 
100  for (int i = 0; i < size; i++)
101  counts[bits[i]]++;
102 
103  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
104  * So we reset it here. The code assigned to this element is 0x00. */
105  codes[0] = counts[0] = 0;
106  for (int i = 0; i < 16; i++) {
107  codes[i+1] = (codes[i] + counts[i]) << 1;
108  if (counts[i])
109  maxbits = i;
110  }
111  for (int i = 0; i < size; i++)
112  cw[i] = codes[bits[i]]++;
113 
114  vlc->table = &table_data[*offset];
116  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
117  bits, 1, 1,
118  cw, 2, 2,
119  syms, !!syms, !!syms, VLC_INIT_STATIC_OVERLONG);
120  *offset += vlc->table_size;
121 }
122 
123 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
124  int *offset)
125 {
126  VLC vlc = { 0 };
128  *vlcp = vlc.table;
129 }
130 
131 /**
132  * Initialize all tables.
133  */
134 static av_cold void rv34_init_tables(void)
135 {
136  int i, j, k, offset = 0;
137 
138  for(i = 0; i < NUM_INTRA_TABLES; i++){
139  for(j = 0; j < 2; j++){
141  &intra_vlcs[i].cbppattern[j], &offset);
143  &intra_vlcs[i].second_pattern[j], &offset);
145  &intra_vlcs[i].third_pattern[j], &offset);
146  for(k = 0; k < 4; k++){
148  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
149  }
150  }
151  for(j = 0; j < 4; j++){
153  &intra_vlcs[i].first_pattern[j], &offset);
154  }
156  &intra_vlcs[i].coefficient, &offset);
157  }
158 
159  for(i = 0; i < NUM_INTER_TABLES; i++){
161  &inter_vlcs[i].cbppattern[0], &offset);
162  for(j = 0; j < 4; j++){
164  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
165  }
166  for(j = 0; j < 2; j++){
168  &inter_vlcs[i].first_pattern[j], &offset);
170  &inter_vlcs[i].second_pattern[j], &offset);
172  &inter_vlcs[i].third_pattern[j], &offset);
173  }
175  &inter_vlcs[i].coefficient, &offset);
176  }
177 }
178 
179 /** @} */ // vlc group
180 
181 /**
182  * @name RV30/40 4x4 block decoding functions
183  * @{
184  */
185 
186 /**
187  * Decode coded block pattern.
188  */
189 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
190 {
191  int pattern, code, cbp=0;
192  int ones;
193  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
194  static const int shifts[4] = { 0, 2, 8, 10 };
195  const int *curshift = shifts;
196  int i, t, mask;
197 
198  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
199  pattern = code & 0xF;
200  code >>= 4;
201 
202  ones = rv34_count_ones[pattern];
203 
204  for(mask = 8; mask; mask >>= 1, curshift++){
205  if(pattern & mask)
206  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
207  }
208 
209  for(i = 0; i < 4; i++){
210  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
211  if(t == 1)
212  cbp |= cbp_masks[get_bits1(gb)] << i;
213  if(t == 2)
214  cbp |= cbp_masks[2] << i;
215  }
216  return cbp;
217 }
218 
219 /**
220  * Get one coefficient value from the bitstream and store it.
221  */
222 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
223  const VLCElem *vlc, int q)
224 {
225  if(coef){
226  if(coef == esc){
227  coef = get_vlc2(gb, vlc, 9, 2);
228  if(coef > 23){
229  coef -= 23;
230  coef = 22 + ((1 << coef) | get_bits(gb, coef));
231  }
232  coef += esc;
233  }
234  if(get_bits1(gb))
235  coef = -coef;
236  *dst = (coef*q + 8) >> 4;
237  }
238 }
239 
240 /**
241  * Decode 2x2 subblock of coefficients.
242  */
243 static inline void decode_subblock(int16_t *dst, int code, const int is_block2,
244  GetBitContext *gb, const VLCElem *vlc, int q)
245 {
247 
248  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
249  if(is_block2){
250  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
251  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
252  }else{
253  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
254  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
255  }
256  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
257 }
258 
259 /**
260  * Decode a single coefficient.
261  */
262 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb,
263  const VLCElem *vlc, int q)
264 {
265  int coeff = modulo_three_table[code] >> 6;
266  decode_coeff(dst, coeff, 3, gb, vlc, q);
267 }
268 
269 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb,
270  const VLCElem *vlc,
271  int q_dc, int q_ac1, int q_ac2)
272 {
274 
275  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
276  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
277  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
278  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
279 }
280 
281 /**
282  * Decode coefficients for 4x4 block.
283  *
284  * This is done by filling 2x2 subblocks with decoded coefficients
285  * in this order (the same for subblocks and subblock coefficients):
286  * o--o
287  * /
288  * /
289  * o--o
290  */
291 
292 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
293  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
294 {
295  int code, pattern, has_ac = 1;
296 
297  code = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
298 
299  pattern = code & 0x7;
300 
301  code >>= 3;
302 
303  if (modulo_three_table[code] & 0x3F) {
304  decode_subblock3(dst, code, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
305  } else {
306  decode_subblock1(dst, code, gb, rvlc->coefficient, q_dc);
307  if (!pattern)
308  return 0;
309  has_ac = 0;
310  }
311 
312  if(pattern & 4){
313  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
314  decode_subblock(dst + 4*0+2, code, 0, gb, rvlc->coefficient, q_ac2);
315  }
316  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
317  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
318  decode_subblock(dst + 4*2+0, code, 1, gb, rvlc->coefficient, q_ac2);
319  }
320  if(pattern & 1){
321  code = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
322  decode_subblock(dst + 4*2+2, code, 0, gb, rvlc->coefficient, q_ac2);
323  }
324  return has_ac | pattern;
325 }
326 
327 /**
328  * @name RV30/40 bitstream parsing
329  * @{
330  */
331 
332 /**
333  * Decode starting slice position.
334  * @todo Maybe replace with ff_h263_decode_mba() ?
335  */
337 {
338  int i;
339  for(i = 0; i < 5; i++)
340  if(rv34_mb_max_sizes[i] >= mb_size - 1)
341  break;
342  return rv34_mb_bits_sizes[i];
343 }
344 
345 /**
346  * Select VLC set for decoding from current quantizer, modifier and frame type.
347  */
348 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
349 {
350  if(mod == 2 && quant < 19) quant += 10;
351  else if(mod && quant < 26) quant += 5;
352  av_assert2(quant >= 0 && quant < 32);
355 }
356 
357 /**
358  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
359  */
360 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
361 {
362  MpegEncContext *s = &r->s;
363  GetBitContext *gb = &s->gb;
364  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
365  int t;
366 
367  r->is16 = get_bits1(gb);
368  if(r->is16){
369  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
370  r->block_type = RV34_MB_TYPE_INTRA16x16;
371  t = get_bits(gb, 2);
372  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
373  r->luma_vlc = 2;
374  }else{
375  if(!r->rv30){
376  if(!get_bits1(gb))
377  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
378  }
379  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
380  r->block_type = RV34_MB_TYPE_INTRA;
381  if(r->decode_intra_types(r, gb, intra_types) < 0)
382  return -1;
383  r->luma_vlc = 1;
384  }
385 
386  r->chroma_vlc = 0;
387  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
388 
389  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
390 }
391 
392 /**
393  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
394  */
395 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
396 {
397  MpegEncContext *s = &r->s;
398  GetBitContext *gb = &s->gb;
399  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
400  int i, t;
401 
402  r->block_type = r->decode_mb_info(r);
403  if(r->block_type == -1)
404  return -1;
405  s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
406  r->mb_type[mb_pos] = r->block_type;
407  if(r->block_type == RV34_MB_SKIP){
408  if(s->pict_type == AV_PICTURE_TYPE_P)
409  r->mb_type[mb_pos] = RV34_MB_P_16x16;
410  if(s->pict_type == AV_PICTURE_TYPE_B)
411  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
412  }
413  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
414  if (rv34_decode_mv(r, r->block_type) < 0)
415  return -1;
416  if(r->block_type == RV34_MB_SKIP){
417  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
418  return 0;
419  }
420  r->chroma_vlc = 1;
421  r->luma_vlc = 0;
422 
423  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
424  if(r->is16){
425  t = get_bits(gb, 2);
426  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
427  r->luma_vlc = 2;
428  }else{
429  if(r->decode_intra_types(r, gb, intra_types) < 0)
430  return -1;
431  r->luma_vlc = 1;
432  }
433  r->chroma_vlc = 0;
434  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
435  }else{
436  for(i = 0; i < 16; i++)
437  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
438  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
439  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
440  r->is16 = 1;
441  r->chroma_vlc = 1;
442  r->luma_vlc = 2;
443  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
444  }
445  }
446 
447  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
448 }
449 
450 /** @} */ //bitstream functions
451 
452 /**
453  * @name motion vector related code (prediction, reconstruction, motion compensation)
454  * @{
455  */
456 
457 /** macroblock partition width in 8x8 blocks */
458 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
459 
460 /** macroblock partition height in 8x8 blocks */
461 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
462 
463 /** availability index for subblocks */
464 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
465 
466 /**
467  * motion vector prediction
468  *
469  * Motion prediction performed for the block by using median prediction of
470  * motion vectors from the left, top and right top blocks but in corner cases
471  * some other vectors may be used instead.
472  */
473 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
474 {
475  MpegEncContext *s = &r->s;
476  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
477  int A[2] = {0}, B[2], C[2];
478  int i, j;
479  int mx, my;
480  int* avail = r->avail_cache + avail_indexes[subblock_no];
481  int c_off = part_sizes_w[block_type];
482 
483  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
484  if(subblock_no == 3)
485  c_off = -1;
486 
487  if(avail[-1]){
488  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
489  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
490  }
491  if(avail[-4]){
492  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
493  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
494  }else{
495  B[0] = A[0];
496  B[1] = A[1];
497  }
498  if(!avail[c_off-4]){
499  if(avail[-4] && (avail[-1] || r->rv30)){
500  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
501  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
502  }else{
503  C[0] = A[0];
504  C[1] = A[1];
505  }
506  }else{
507  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
508  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
509  }
510  mx = mid_pred(A[0], B[0], C[0]);
511  my = mid_pred(A[1], B[1], C[1]);
512  mx += r->dmv[dmv_no][0];
513  my += r->dmv[dmv_no][1];
514  for(j = 0; j < part_sizes_h[block_type]; j++){
515  for(i = 0; i < part_sizes_w[block_type]; i++){
516  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
517  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
518  }
519  }
520 }
521 
522 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
523 
524 /**
525  * Calculate motion vector component that should be added for direct blocks.
526  */
527 static int calc_add_mv(RV34DecContext *r, int dir, int val)
528 {
529  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
530 
531  return (int)(val * (SUINT)mul + 0x2000) >> 14;
532 }
533 
534 /**
535  * Predict motion vector for B-frame macroblock.
536  */
537 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
538  int A_avail, int B_avail, int C_avail,
539  int *mx, int *my)
540 {
541  if(A_avail + B_avail + C_avail != 3){
542  *mx = A[0] + B[0] + C[0];
543  *my = A[1] + B[1] + C[1];
544  if(A_avail + B_avail + C_avail == 2){
545  *mx /= 2;
546  *my /= 2;
547  }
548  }else{
549  *mx = mid_pred(A[0], B[0], C[0]);
550  *my = mid_pred(A[1], B[1], C[1]);
551  }
552 }
553 
554 /**
555  * motion vector prediction for B-frames
556  */
557 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
558 {
559  MpegEncContext *s = &r->s;
560  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
561  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
562  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
563  int has_A = 0, has_B = 0, has_C = 0;
564  int mx, my;
565  int i, j;
566  Picture *cur_pic = s->current_picture_ptr;
567  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
568  int type = cur_pic->mb_type[mb_pos];
569 
570  if((r->avail_cache[6-1] & type) & mask){
571  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
572  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
573  has_A = 1;
574  }
575  if((r->avail_cache[6-4] & type) & mask){
576  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
577  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
578  has_B = 1;
579  }
580  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
581  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
582  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
583  has_C = 1;
584  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
585  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
586  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
587  has_C = 1;
588  }
589 
590  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
591 
592  mx += r->dmv[dir][0];
593  my += r->dmv[dir][1];
594 
595  for(j = 0; j < 2; j++){
596  for(i = 0; i < 2; i++){
597  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
598  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
599  }
600  }
601  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
602  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
603  }
604 }
605 
606 /**
607  * motion vector prediction - RV3 version
608  */
609 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
610 {
611  MpegEncContext *s = &r->s;
612  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
613  int A[2] = {0}, B[2], C[2];
614  int i, j, k;
615  int mx, my;
616  int* avail = r->avail_cache + avail_indexes[0];
617 
618  if(avail[-1]){
619  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
620  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
621  }
622  if(avail[-4]){
623  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
624  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
625  }else{
626  B[0] = A[0];
627  B[1] = A[1];
628  }
629  if(!avail[-4 + 2]){
630  if(avail[-4] && (avail[-1])){
631  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
632  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
633  }else{
634  C[0] = A[0];
635  C[1] = A[1];
636  }
637  }else{
638  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
639  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
640  }
641  mx = mid_pred(A[0], B[0], C[0]);
642  my = mid_pred(A[1], B[1], C[1]);
643  mx += r->dmv[0][0];
644  my += r->dmv[0][1];
645  for(j = 0; j < 2; j++){
646  for(i = 0; i < 2; i++){
647  for(k = 0; k < 2; k++){
648  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
649  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
650  }
651  }
652  }
653 }
654 
655 static const int chroma_coeffs[3] = { 0, 3, 5 };
656 
657 /**
658  * generic motion compensation function
659  *
660  * @param r decoder context
661  * @param block_type type of the current block
662  * @param xoff horizontal offset from the start of the current block
663  * @param yoff vertical offset from the start of the current block
664  * @param mv_off offset to the motion vector information
665  * @param width width of the current partition in 8x8 blocks
666  * @param height height of the current partition in 8x8 blocks
667  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
668  * @param thirdpel motion vectors are specified in 1/3 of pixel
669  * @param qpel_mc a set of functions used to perform luma motion compensation
670  * @param chroma_mc a set of functions used to perform chroma motion compensation
671  */
672 static inline void rv34_mc(RV34DecContext *r, const int block_type,
673  const int xoff, const int yoff, int mv_off,
674  const int width, const int height, int dir,
675  const int thirdpel, int weighted,
676  qpel_mc_func (*qpel_mc)[16],
678 {
679  MpegEncContext *s = &r->s;
680  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
681  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
682  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
683  int is16x16 = 1;
684  int emu = 0;
685 
686  if(thirdpel){
687  int chroma_mx, chroma_my;
688  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
689  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
690  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
691  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
692  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
693  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
694  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
695  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
696  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
697  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
698  }else{
699  int cx, cy;
700  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
701  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
702  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
703  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
704  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
705  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
706  umx = cx >> 2;
707  umy = cy >> 2;
708  uvmx = (cx & 3) << 1;
709  uvmy = (cy & 3) << 1;
710  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
711  if(uvmx == 6 && uvmy == 6)
712  uvmx = uvmy = 4;
713  }
714 
715  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
716  /* wait for the referenced mb row to be finished */
717  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
718  const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
719  ff_thread_await_progress(f, mb_row, 0);
720  }
721 
722  dxy = ly*4 + lx;
723  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
724  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
725  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
726  src_x = s->mb_x * 16 + xoff + mx;
727  src_y = s->mb_y * 16 + yoff + my;
728  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
729  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
730  srcY += src_y * s->linesize + src_x;
731  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
732  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
733  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
734  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
735  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
736  srcY -= 2 + 2*s->linesize;
737  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
738  s->linesize, s->linesize,
739  (width << 3) + 6, (height << 3) + 6,
740  src_x - 2, src_y - 2,
741  s->h_edge_pos, s->v_edge_pos);
742  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
743  emu = 1;
744  }
745  if(!weighted){
746  Y = s->dest[0] + xoff + yoff *s->linesize;
747  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
748  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
749  }else{
750  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
751  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
752  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
753  }
754 
755  if(block_type == RV34_MB_P_16x8){
756  qpel_mc[1][dxy](Y, srcY, s->linesize);
757  Y += 8;
758  srcY += 8;
759  }else if(block_type == RV34_MB_P_8x16){
760  qpel_mc[1][dxy](Y, srcY, s->linesize);
761  Y += 8 * s->linesize;
762  srcY += 8 * s->linesize;
763  }
764  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
765  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
766  if (emu) {
767  uint8_t *uvbuf = s->sc.edge_emu_buffer;
768 
769  s->vdsp.emulated_edge_mc(uvbuf, srcU,
770  s->uvlinesize, s->uvlinesize,
771  (width << 2) + 1, (height << 2) + 1,
772  uvsrc_x, uvsrc_y,
773  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
774  srcU = uvbuf;
775  uvbuf += 9*s->uvlinesize;
776 
777  s->vdsp.emulated_edge_mc(uvbuf, srcV,
778  s->uvlinesize, s->uvlinesize,
779  (width << 2) + 1, (height << 2) + 1,
780  uvsrc_x, uvsrc_y,
781  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
782  srcV = uvbuf;
783  }
784  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
785  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
786 }
787 
788 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
789  const int xoff, const int yoff, int mv_off,
790  const int width, const int height, int dir)
791 {
792  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
793  r->rdsp.put_pixels_tab,
794  r->rdsp.put_chroma_pixels_tab);
795 }
796 
798 {
799  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
800  r->tmp_b_block_y[0],
801  r->tmp_b_block_y[1],
802  r->weight1,
803  r->weight2,
804  r->s.linesize);
805  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
806  r->tmp_b_block_uv[0],
807  r->tmp_b_block_uv[2],
808  r->weight1,
809  r->weight2,
810  r->s.uvlinesize);
811  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
812  r->tmp_b_block_uv[1],
813  r->tmp_b_block_uv[3],
814  r->weight1,
815  r->weight2,
816  r->s.uvlinesize);
817 }
818 
819 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
820 {
821  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
822 
823  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
824  r->rdsp.put_pixels_tab,
825  r->rdsp.put_chroma_pixels_tab);
826  if(!weighted){
827  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
828  r->rdsp.avg_pixels_tab,
829  r->rdsp.avg_chroma_pixels_tab);
830  }else{
831  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
832  r->rdsp.put_pixels_tab,
833  r->rdsp.put_chroma_pixels_tab);
834  rv4_weight(r);
835  }
836 }
837 
839 {
840  int i, j;
841  int weighted = !r->rv30 && r->weight1 != 8192;
842 
843  for(j = 0; j < 2; j++)
844  for(i = 0; i < 2; i++){
845  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
846  weighted,
847  r->rdsp.put_pixels_tab,
848  r->rdsp.put_chroma_pixels_tab);
849  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
850  weighted,
851  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
852  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
853  }
854  if(weighted)
855  rv4_weight(r);
856 }
857 
858 /** number of motion vectors in each macroblock type */
859 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
860 
861 /**
862  * Decode motion vector differences
863  * and perform motion vector reconstruction and motion compensation.
864  */
865 static int rv34_decode_mv(RV34DecContext *r, int block_type)
866 {
867  MpegEncContext *s = &r->s;
868  GetBitContext *gb = &s->gb;
869  int i, j, k, l;
870  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
871  int next_bt;
872 
873  memset(r->dmv, 0, sizeof(r->dmv));
874  for(i = 0; i < num_mvs[block_type]; i++){
875  r->dmv[i][0] = get_interleaved_se_golomb(gb);
876  r->dmv[i][1] = get_interleaved_se_golomb(gb);
877  if (r->dmv[i][0] == INVALID_VLC ||
878  r->dmv[i][1] == INVALID_VLC) {
879  r->dmv[i][0] = r->dmv[i][1] = 0;
880  return AVERROR_INVALIDDATA;
881  }
882  }
883  switch(block_type){
884  case RV34_MB_TYPE_INTRA:
886  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
887  return 0;
888  case RV34_MB_SKIP:
889  if(s->pict_type == AV_PICTURE_TYPE_P){
890  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
891  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
892  break;
893  }
894  case RV34_MB_B_DIRECT:
895  //surprisingly, it uses motion scheme from next reference frame
896  /* wait for the current mb row to be finished */
897  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
898  ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
899 
900  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
901  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
902  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
903  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
904  }else
905  for(j = 0; j < 2; j++)
906  for(i = 0; i < 2; i++)
907  for(k = 0; k < 2; k++)
908  for(l = 0; l < 2; l++)
909  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
910  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
911  rv34_mc_2mv(r, block_type);
912  else
914  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
915  break;
916  case RV34_MB_P_16x16:
917  case RV34_MB_P_MIX16x16:
918  rv34_pred_mv(r, block_type, 0, 0);
919  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
920  break;
921  case RV34_MB_B_FORWARD:
922  case RV34_MB_B_BACKWARD:
923  r->dmv[1][0] = r->dmv[0][0];
924  r->dmv[1][1] = r->dmv[0][1];
925  if(r->rv30)
926  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
927  else
928  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
929  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
930  break;
931  case RV34_MB_P_16x8:
932  case RV34_MB_P_8x16:
933  rv34_pred_mv(r, block_type, 0, 0);
934  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
935  if(block_type == RV34_MB_P_16x8){
936  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
937  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
938  }
939  if(block_type == RV34_MB_P_8x16){
940  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
941  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
942  }
943  break;
944  case RV34_MB_B_BIDIR:
945  rv34_pred_mv_b (r, block_type, 0);
946  rv34_pred_mv_b (r, block_type, 1);
947  rv34_mc_2mv (r, block_type);
948  break;
949  case RV34_MB_P_8x8:
950  for(i=0;i< 4;i++){
951  rv34_pred_mv(r, block_type, i, i);
952  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
953  }
954  break;
955  }
956 
957  return 0;
958 }
959 /** @} */ // mv group
960 
961 /**
962  * @name Macroblock reconstruction functions
963  * @{
964  */
965 /** mapping of RV30/40 intra prediction types to standard H.264 types */
966 static const int ittrans[9] = {
969 };
970 
971 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
972 static const int ittrans16[4] = {
974 };
975 
976 /**
977  * Perform 4x4 intra prediction.
978  */
979 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
980 {
981  uint8_t *prev = dst - stride + 4;
982  uint32_t topleft;
983 
984  if(!up && !left)
985  itype = DC_128_PRED;
986  else if(!up){
987  if(itype == VERT_PRED) itype = HOR_PRED;
988  if(itype == DC_PRED) itype = LEFT_DC_PRED;
989  }else if(!left){
990  if(itype == HOR_PRED) itype = VERT_PRED;
991  if(itype == DC_PRED) itype = TOP_DC_PRED;
993  }
994  if(!down){
996  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
997  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
998  }
999  if(!right && up){
1000  topleft = dst[-stride + 3] * 0x01010101u;
1001  prev = (uint8_t*)&topleft;
1002  }
1003  r->h.pred4x4[itype](dst, prev, stride);
1004 }
1005 
1006 static inline int adjust_pred16(int itype, int up, int left)
1007 {
1008  if(!up && !left)
1009  itype = DC_128_PRED8x8;
1010  else if(!up){
1011  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1012  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1013  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1014  }else if(!left){
1015  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1016  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1017  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1018  }
1019  return itype;
1020 }
1021 
1023  uint8_t *pdst, int stride,
1024  int fc, int sc, int q_dc, int q_ac)
1025 {
1026  MpegEncContext *s = &r->s;
1027  int16_t *ptr = s->block[0];
1028  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1029  fc, sc, q_dc, q_ac, q_ac);
1030  if(has_ac){
1031  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1032  }else{
1033  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1034  ptr[0] = 0;
1035  }
1036 }
1037 
1038 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1039 {
1040  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1041  MpegEncContext *s = &r->s;
1042  GetBitContext *gb = &s->gb;
1043  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1044  q_ac = rv34_qscale_tab[s->qscale];
1045  uint8_t *dst = s->dest[0];
1046  int16_t *ptr = s->block[0];
1047  int i, j, itype, has_ac;
1048 
1049  memset(block16, 0, 16 * sizeof(*block16));
1050 
1051  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1052  if(has_ac)
1053  r->rdsp.rv34_inv_transform(block16);
1054  else
1055  r->rdsp.rv34_inv_transform_dc(block16);
1056 
1057  itype = ittrans16[intra_types[0]];
1058  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1059  r->h.pred16x16[itype](dst, s->linesize);
1060 
1061  for(j = 0; j < 4; j++){
1062  for(i = 0; i < 4; i++, cbp >>= 1){
1063  int dc = block16[i + j*4];
1064 
1065  if(cbp & 1){
1066  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1067  }else
1068  has_ac = 0;
1069 
1070  if(has_ac){
1071  ptr[0] = dc;
1072  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1073  }else
1074  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1075  }
1076 
1077  dst += 4*s->linesize;
1078  }
1079 
1080  itype = ittrans16[intra_types[0]];
1081  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1082  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1083 
1084  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1085  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1086 
1087  for(j = 1; j < 3; j++){
1088  dst = s->dest[j];
1089  r->h.pred8x8[itype](dst, s->uvlinesize);
1090  for(i = 0; i < 4; i++, cbp >>= 1){
1091  uint8_t *pdst;
1092  if(!(cbp & 1)) continue;
1093  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1094 
1095  rv34_process_block(r, pdst, s->uvlinesize,
1096  r->chroma_vlc, 1, q_dc, q_ac);
1097  }
1098  }
1099 }
1100 
1101 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1102 {
1103  MpegEncContext *s = &r->s;
1104  uint8_t *dst = s->dest[0];
1105  int avail[6*8] = {0};
1106  int i, j, k;
1107  int idx, q_ac, q_dc;
1108 
1109  // Set neighbour information.
1110  if(r->avail_cache[1])
1111  avail[0] = 1;
1112  if(r->avail_cache[2])
1113  avail[1] = avail[2] = 1;
1114  if(r->avail_cache[3])
1115  avail[3] = avail[4] = 1;
1116  if(r->avail_cache[4])
1117  avail[5] = 1;
1118  if(r->avail_cache[5])
1119  avail[8] = avail[16] = 1;
1120  if(r->avail_cache[9])
1121  avail[24] = avail[32] = 1;
1122 
1123  q_ac = rv34_qscale_tab[s->qscale];
1124  for(j = 0; j < 4; j++){
1125  idx = 9 + j*8;
1126  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1127  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1128  avail[idx] = 1;
1129  if(!(cbp & 1)) continue;
1130 
1131  rv34_process_block(r, dst, s->linesize,
1132  r->luma_vlc, 0, q_ac, q_ac);
1133  }
1134  dst += s->linesize * 4 - 4*4;
1135  intra_types += r->intra_types_stride;
1136  }
1137 
1138  intra_types -= r->intra_types_stride * 4;
1139 
1140  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1141  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1142 
1143  for(k = 0; k < 2; k++){
1144  dst = s->dest[1+k];
1145  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1146 
1147  for(j = 0; j < 2; j++){
1148  int* acache = r->avail_cache + 6 + j*4;
1149  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1150  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1151  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1152  acache[0] = 1;
1153 
1154  if(!(cbp&1)) continue;
1155 
1156  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1157  r->chroma_vlc, 1, q_dc, q_ac);
1158  }
1159 
1160  dst += 4*s->uvlinesize;
1161  }
1162  }
1163 }
1164 
1165 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1166 {
1167  int d;
1168  d = motion_val[0][0] - motion_val[-step][0];
1169  if(d < -3 || d > 3)
1170  return 1;
1171  d = motion_val[0][1] - motion_val[-step][1];
1172  if(d < -3 || d > 3)
1173  return 1;
1174  return 0;
1175 }
1176 
1178 {
1179  MpegEncContext *s = &r->s;
1180  int hmvmask = 0, vmvmask = 0, i, j;
1181  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1182  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1183  for(j = 0; j < 16; j += 8){
1184  for(i = 0; i < 2; i++){
1185  if(is_mv_diff_gt_3(motion_val + i, 1))
1186  vmvmask |= 0x11 << (j + i*2);
1187  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1188  hmvmask |= 0x03 << (j + i*2);
1189  }
1190  motion_val += s->b8_stride;
1191  }
1192  if(s->first_slice_line)
1193  hmvmask &= ~0x000F;
1194  if(!s->mb_x)
1195  vmvmask &= ~0x1111;
1196  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1197  vmvmask |= (vmvmask & 0x4444) >> 1;
1198  hmvmask |= (hmvmask & 0x0F00) >> 4;
1199  if(s->mb_x)
1200  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1201  if(!s->first_slice_line)
1202  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1203  }
1204  return hmvmask | vmvmask;
1205 }
1206 
1207 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1208 {
1209  MpegEncContext *s = &r->s;
1210  GetBitContext *gb = &s->gb;
1211  uint8_t *dst = s->dest[0];
1212  int16_t *ptr = s->block[0];
1213  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1214  int cbp, cbp2;
1215  int q_dc, q_ac, has_ac;
1216  int i, j;
1217  int dist;
1218 
1219  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1220  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1221  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1222  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1223  if(s->mb_x && dist)
1224  r->avail_cache[5] =
1225  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1226  if(dist >= s->mb_width)
1227  r->avail_cache[2] =
1228  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1229  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1230  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1231  if(s->mb_x && dist > s->mb_width)
1232  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1233 
1234  s->qscale = r->si.quant;
1235  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1236  r->cbp_luma [mb_pos] = cbp;
1237  r->cbp_chroma[mb_pos] = cbp >> 16;
1238  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1239  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1240 
1241  if(cbp == -1)
1242  return -1;
1243 
1244  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1245  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1246  else rv34_output_intra(r, intra_types, cbp);
1247  return 0;
1248  }
1249 
1250  if(r->is16){
1251  // Only for RV34_MB_P_MIX16x16
1252  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1253  memset(block16, 0, 16 * sizeof(*block16));
1254  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1255  q_ac = rv34_qscale_tab[s->qscale];
1256  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1257  r->rdsp.rv34_inv_transform(block16);
1258  else
1259  r->rdsp.rv34_inv_transform_dc(block16);
1260 
1261  q_ac = rv34_qscale_tab[s->qscale];
1262 
1263  for(j = 0; j < 4; j++){
1264  for(i = 0; i < 4; i++, cbp >>= 1){
1265  int dc = block16[i + j*4];
1266 
1267  if(cbp & 1){
1268  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1269  }else
1270  has_ac = 0;
1271 
1272  if(has_ac){
1273  ptr[0] = dc;
1274  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1275  }else
1276  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1277  }
1278 
1279  dst += 4*s->linesize;
1280  }
1281 
1282  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1283  }else{
1284  q_ac = rv34_qscale_tab[s->qscale];
1285 
1286  for(j = 0; j < 4; j++){
1287  for(i = 0; i < 4; i++, cbp >>= 1){
1288  if(!(cbp & 1)) continue;
1289 
1290  rv34_process_block(r, dst + 4*i, s->linesize,
1291  r->luma_vlc, 0, q_ac, q_ac);
1292  }
1293  dst += 4*s->linesize;
1294  }
1295  }
1296 
1297  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1298  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1299 
1300  for(j = 1; j < 3; j++){
1301  dst = s->dest[j];
1302  for(i = 0; i < 4; i++, cbp >>= 1){
1303  uint8_t *pdst;
1304  if(!(cbp & 1)) continue;
1305  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1306 
1307  rv34_process_block(r, pdst, s->uvlinesize,
1308  r->chroma_vlc, 1, q_dc, q_ac);
1309  }
1310  }
1311 
1312  return 0;
1313 }
1314 
1315 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1316 {
1317  MpegEncContext *s = &r->s;
1318  int cbp, dist;
1319  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1320 
1321  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1322  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1323  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1324  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1325  if(s->mb_x && dist)
1326  r->avail_cache[5] =
1327  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1328  if(dist >= s->mb_width)
1329  r->avail_cache[2] =
1330  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1331  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1332  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1333  if(s->mb_x && dist > s->mb_width)
1334  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1335 
1336  s->qscale = r->si.quant;
1337  cbp = rv34_decode_intra_mb_header(r, intra_types);
1338  r->cbp_luma [mb_pos] = cbp;
1339  r->cbp_chroma[mb_pos] = cbp >> 16;
1340  r->deblock_coefs[mb_pos] = 0xFFFF;
1341  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1342 
1343  if(cbp == -1)
1344  return -1;
1345 
1346  if(r->is16){
1347  rv34_output_i16x16(r, intra_types, cbp);
1348  return 0;
1349  }
1350 
1351  rv34_output_intra(r, intra_types, cbp);
1352  return 0;
1353 }
1354 
1356 {
1357  int bits;
1358  if(s->mb_y >= s->mb_height)
1359  return 1;
1360  if(!s->mb_num_left)
1361  return 1;
1362  if(r->s.mb_skip_run > 1)
1363  return 0;
1364  bits = get_bits_left(&s->gb);
1365  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1366  return 1;
1367  return 0;
1368 }
1369 
1370 
1372 {
1373  av_freep(&r->intra_types_hist);
1374  r->intra_types = NULL;
1375  av_freep(&r->tmp_b_block_base);
1376  av_freep(&r->mb_type);
1377  av_freep(&r->cbp_luma);
1378  av_freep(&r->cbp_chroma);
1379  av_freep(&r->deblock_coefs);
1380 }
1381 
1382 
1384 {
1385  r->intra_types_stride = r->s.mb_width * 4 + 4;
1386 
1387  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1388  sizeof(*r->cbp_chroma));
1389  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1390  sizeof(*r->cbp_luma));
1391  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1392  sizeof(*r->deblock_coefs));
1393  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1394  sizeof(*r->intra_types_hist));
1395  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1396  sizeof(*r->mb_type));
1397 
1398  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1399  r->intra_types_hist && r->mb_type)) {
1400  r->s.context_reinit = 1;
1402  return AVERROR(ENOMEM);
1403  }
1404 
1405  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1406 
1407  return 0;
1408 }
1409 
1410 
1412 {
1414  return rv34_decoder_alloc(r);
1415 }
1416 
1417 
1418 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1419 {
1420  MpegEncContext *s = &r->s;
1421  GetBitContext *gb = &s->gb;
1422  int mb_pos, slice_type;
1423  int res;
1424 
1425  init_get_bits(&r->s.gb, buf, buf_size*8);
1426  res = r->parse_slice_header(r, gb, &r->si);
1427  if(res < 0){
1428  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1429  return -1;
1430  }
1431 
1432  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1433  if (slice_type != s->pict_type) {
1434  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1435  return AVERROR_INVALIDDATA;
1436  }
1437  if (s->width != r->si.width || s->height != r->si.height) {
1438  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1439  return AVERROR_INVALIDDATA;
1440  }
1441 
1442  r->si.end = end;
1443  s->qscale = r->si.quant;
1444  s->mb_num_left = r->si.end - r->si.start;
1445  r->s.mb_skip_run = 0;
1446 
1447  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1448  if(r->si.start != mb_pos){
1449  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1450  s->mb_x = r->si.start % s->mb_width;
1451  s->mb_y = r->si.start / s->mb_width;
1452  }
1453  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1454  s->first_slice_line = 1;
1455  s->resync_mb_x = s->mb_x;
1456  s->resync_mb_y = s->mb_y;
1457 
1459  while(!check_slice_end(r, s)) {
1460  ff_update_block_index(s, 8, 0, 1);
1461 
1462  if(r->si.type)
1463  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1464  else
1465  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1466  if(res < 0){
1467  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1468  return -1;
1469  }
1470  if (++s->mb_x == s->mb_width) {
1471  s->mb_x = 0;
1472  s->mb_y++;
1474 
1475  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1476  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1477 
1478  if(r->loop_filter && s->mb_y >= 2)
1479  r->loop_filter(r, s->mb_y - 2);
1480 
1481  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1482  ff_thread_report_progress(&s->current_picture_ptr->tf,
1483  s->mb_y - 2, 0);
1484 
1485  }
1486  if(s->mb_x == s->resync_mb_x)
1487  s->first_slice_line=0;
1488  s->mb_num_left--;
1489  }
1490  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1491 
1492  return s->mb_y == s->mb_height;
1493 }
1494 
1495 /** @} */ // reconstruction group end
1496 
1497 /**
1498  * Initialize decoder.
1499  */
1501 {
1502  static AVOnce init_static_once = AV_ONCE_INIT;
1503  RV34DecContext *r = avctx->priv_data;
1504  MpegEncContext *s = &r->s;
1505  int ret;
1506 
1507  ff_mpv_decode_init(s, avctx);
1508  s->out_format = FMT_H263;
1509 
1510  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1511  avctx->has_b_frames = 1;
1512  s->low_delay = 0;
1513 
1514  if ((ret = ff_mpv_common_init(s)) < 0)
1515  return ret;
1516 
1517  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1518 
1519  if ((ret = rv34_decoder_alloc(r)) < 0) {
1520  ff_mpv_common_end(&r->s);
1521  return ret;
1522  }
1523 
1524  ff_thread_once(&init_static_once, rv34_init_tables);
1525 
1526  return 0;
1527 }
1528 
1530 {
1531  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1532  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1533  int err;
1534 
1535  if (dst == src || !s1->context_initialized)
1536  return 0;
1537 
1538  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1539  s->height = s1->height;
1540  s->width = s1->width;
1541  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1542  return err;
1543  if ((err = rv34_decoder_realloc(r)) < 0)
1544  return err;
1545  }
1546 
1547  r->cur_pts = r1->cur_pts;
1548  r->last_pts = r1->last_pts;
1549  r->next_pts = r1->next_pts;
1550 
1551  memset(&r->si, 0, sizeof(r->si));
1552 
1553  // Do no call ff_mpeg_update_thread_context on a partially initialized
1554  // decoder context.
1555  if (!s1->context_initialized)
1556  return 0;
1557 
1558  return ff_mpeg_update_thread_context(dst, src);
1559 }
1560 
1561 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1562 {
1563  if (n < slice_count) {
1564  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1565  } else
1566  return buf_size;
1567 }
1568 
1569 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1570 {
1571  RV34DecContext *r = avctx->priv_data;
1572  MpegEncContext *s = &r->s;
1573  int got_picture = 0, ret;
1574 
1575  ff_er_frame_end(&s->er, NULL);
1577  s->mb_num_left = 0;
1578 
1579  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1580  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1581 
1582  if (s->pict_type == AV_PICTURE_TYPE_B) {
1583  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1584  return ret;
1585  ff_print_debug_info(s, s->current_picture_ptr, pict);
1586  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1587  got_picture = 1;
1588  } else if (s->last_picture_ptr) {
1589  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1590  return ret;
1591  ff_print_debug_info(s, s->last_picture_ptr, pict);
1592  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1593  got_picture = 1;
1594  }
1595 
1596  return got_picture;
1597 }
1598 
1599 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1600 {
1601  // attempt to keep aspect during typical resolution switches
1602  if (!sar.num)
1603  sar = (AVRational){1, 1};
1604 
1605  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1606  return sar;
1607 }
1608 
1610  int *got_picture_ptr, AVPacket *avpkt)
1611 {
1612  const uint8_t *buf = avpkt->data;
1613  int buf_size = avpkt->size;
1614  RV34DecContext *r = avctx->priv_data;
1615  MpegEncContext *s = &r->s;
1616  SliceInfo si;
1617  int i, ret;
1618  int slice_count;
1619  const uint8_t *slices_hdr = NULL;
1620  int last = 0;
1621  int faulty_b = 0;
1622  int offset;
1623 
1624  /* no supplementary picture */
1625  if (buf_size == 0) {
1626  /* special case for last picture */
1627  if (s->next_picture_ptr) {
1628  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1629  return ret;
1630  s->next_picture_ptr = NULL;
1631 
1632  *got_picture_ptr = 1;
1633  }
1634  return 0;
1635  }
1636 
1637  slice_count = (*buf++) + 1;
1638  slices_hdr = buf + 4;
1639  buf += 8 * slice_count;
1640  buf_size -= 1 + 8 * slice_count;
1641 
1642  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1643  //parse first slice header to check whether this frame can be decoded
1644  if(offset < 0 || offset > buf_size){
1645  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1646  return AVERROR_INVALIDDATA;
1647  }
1648  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1649  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1650  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1651  return AVERROR_INVALIDDATA;
1652  }
1653  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1654  si.type == AV_PICTURE_TYPE_B) {
1655  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1656  "reference data.\n");
1657  faulty_b = 1;
1658  }
1659  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1660  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1661  || avctx->skip_frame >= AVDISCARD_ALL)
1662  return avpkt->size;
1663 
1664  /* first slice */
1665  if (si.start == 0) {
1666  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1667  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1668  s->mb_num_left);
1669  if (!s->context_reinit)
1670  ff_er_frame_end(&s->er, NULL);
1672  }
1673 
1674  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1675  int err;
1676 
1677  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1678  si.width, si.height);
1679 
1680  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1681  return AVERROR_INVALIDDATA;
1682 
1683  s->avctx->sample_aspect_ratio = update_sar(
1684  s->width, s->height, s->avctx->sample_aspect_ratio,
1685  si.width, si.height);
1686  s->width = si.width;
1687  s->height = si.height;
1688 
1689  err = ff_set_dimensions(s->avctx, s->width, s->height);
1690  if (err < 0)
1691  return err;
1692  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1693  return err;
1694  if ((err = rv34_decoder_realloc(r)) < 0)
1695  return err;
1696  }
1697  if (faulty_b)
1698  return AVERROR_INVALIDDATA;
1699  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1700  if (ff_mpv_frame_start(s, s->avctx) < 0)
1701  return -1;
1703  if (!r->tmp_b_block_base) {
1704  int i;
1705 
1706  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1707  if (!r->tmp_b_block_base)
1708  return AVERROR(ENOMEM);
1709  for (i = 0; i < 2; i++)
1710  r->tmp_b_block_y[i] = r->tmp_b_block_base
1711  + i * 16 * s->linesize;
1712  for (i = 0; i < 4; i++)
1713  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1714  + (i >> 1) * 8 * s->uvlinesize
1715  + (i & 1) * 16;
1716  }
1717  r->cur_pts = si.pts;
1718  if (s->pict_type != AV_PICTURE_TYPE_B) {
1719  r->last_pts = r->next_pts;
1720  r->next_pts = r->cur_pts;
1721  } else {
1722  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1723  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1724  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1725 
1726  if(!refdist){
1727  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1728  r->scaled_weight = 0;
1729  }else{
1730  if (FFMAX(dist0, dist1) > refdist)
1731  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1732 
1733  r->mv_weight1 = (dist0 << 14) / refdist;
1734  r->mv_weight2 = (dist1 << 14) / refdist;
1735  if((r->mv_weight1|r->mv_weight2) & 511){
1736  r->weight1 = r->mv_weight1;
1737  r->weight2 = r->mv_weight2;
1738  r->scaled_weight = 0;
1739  }else{
1740  r->weight1 = r->mv_weight1 >> 9;
1741  r->weight2 = r->mv_weight2 >> 9;
1742  r->scaled_weight = 1;
1743  }
1744  }
1745  }
1746  s->mb_x = s->mb_y = 0;
1747  ff_thread_finish_setup(s->avctx);
1748  } else if (s->context_reinit) {
1749  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1750  "reinitialize (start MB is %d).\n", si.start);
1751  return AVERROR_INVALIDDATA;
1752  } else if (HAVE_THREADS &&
1753  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1754  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1755  "multithreading mode (start MB is %d).\n", si.start);
1756  return AVERROR_INVALIDDATA;
1757  }
1758 
1759  for(i = 0; i < slice_count; i++){
1760  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1761  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1762  int size;
1763 
1764  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1765  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1766  break;
1767  }
1768  size = offset1 - offset;
1769 
1770  r->si.end = s->mb_width * s->mb_height;
1771  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1772 
1773  if(i+1 < slice_count){
1774  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1775  if (offset2 < offset1 || offset2 > buf_size) {
1776  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1777  break;
1778  }
1779  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1780  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1781  size = offset2 - offset;
1782  }else
1783  r->si.end = si.start;
1784  }
1785  av_assert0 (size >= 0 && size <= buf_size - offset);
1786  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1787  if(last)
1788  break;
1789  }
1790 
1791  if (s->current_picture_ptr) {
1792  if (last) {
1793  if(r->loop_filter)
1794  r->loop_filter(r, s->mb_height - 1);
1795 
1796  ret = finish_frame(avctx, pict);
1797  if (ret < 0)
1798  return ret;
1799  *got_picture_ptr = ret;
1800  } else if (HAVE_THREADS &&
1801  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1802  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1803  /* always mark the current frame as finished, frame-mt supports
1804  * only complete frames */
1805  ff_er_frame_end(&s->er, NULL);
1807  s->mb_num_left = 0;
1808  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1809  return AVERROR_INVALIDDATA;
1810  }
1811  }
1812 
1813  return avpkt->size;
1814 }
1815 
1817 {
1818  RV34DecContext *r = avctx->priv_data;
1819 
1820  ff_mpv_common_end(&r->s);
1822 
1823  return 0;
1824 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:82
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:58
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1101
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:126
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1816
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:505
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:609
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:972
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:859
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:655
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:336
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1529
AVPacket::data
uint8_t * data
Definition: packet.h:522
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:205
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1411
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1355
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1177
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:45
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:464
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1006
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1383
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:473
GetBitContext
Definition: get_bits.h:108
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:292
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:498
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1500
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:979
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1315
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:51
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:892
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:788
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1207
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:74
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:80
s1
#define s1
Definition: regdef.h:38
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:47
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:527
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:74
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
IS_INTRA
#define IS_INTRA(x, y)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1569
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:222
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:49
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:522
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1418
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:134
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:243
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:50
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:269
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:93
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:865
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:39
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:838
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
f
f
Definition: af_crystalizer.c:121
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1165
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1609
rectangle.h
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:594
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1599
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:262
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:189
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:295
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:69
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1038
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:557
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:461
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:819
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
table_data
static VLCElem table_data[117592]
Definition: rv34.c:83
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1561
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:750
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:37
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:797
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:180
SliceInfo::start
int start
Definition: rv34.h:79
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
ThreadFrame
Definition: threadframe.h:27
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:395
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:458
VLC
Definition: vlc.h:36
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:966
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:829
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:490
VLC::table
VLCElem * table
Definition: vlc.h:38
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:360
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:81
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1022
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:39
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:672
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:74
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:425
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:52
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:348
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int *offset)
Definition: rv34.c:123
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1371
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:537