FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 
31 #include "avcodec.h"
32 #include "error_resilience.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
35 #include "golomb.h"
36 #include "internal.h"
37 #include "mathops.h"
38 #include "mpeg_er.h"
39 #include "qpeldsp.h"
40 #include "rectangle.h"
41 #include "thread.h"
42 
43 #include "rv34vlc.h"
44 #include "rv34data.h"
45 #include "rv34.h"
46 
47 static inline void ZERO8x2(void* dst, int stride)
48 {
49  fill_rectangle(dst, 1, 2, stride, 0, 4);
50  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
51 }
52 
53 /** translation of RV30/40 macroblock types to lavc ones */
54 static const int rv34_mb_type_to_lavc[12] = {
67 };
68 
69 
71 
72 static int rv34_decode_mv(RV34DecContext *r, int block_type);
73 
74 /**
75  * @name RV30/40 VLC generating functions
76  * @{
77  */
78 
79 static const int table_offs[] = {
80  0, 1818, 3622, 4144, 4698, 5234, 5804, 5868, 5900, 5932,
81  5996, 6252, 6316, 6348, 6380, 7674, 8944, 10274, 11668, 12250,
82  14060, 15846, 16372, 16962, 17512, 18148, 18180, 18212, 18244, 18308,
83  18564, 18628, 18660, 18692, 20036, 21314, 22648, 23968, 24614, 26384,
84  28190, 28736, 29366, 29938, 30608, 30640, 30672, 30704, 30768, 31024,
85  31088, 31120, 31184, 32570, 33898, 35236, 36644, 37286, 39020, 40802,
86  41368, 42052, 42692, 43348, 43380, 43412, 43444, 43476, 43604, 43668,
87  43700, 43732, 45100, 46430, 47778, 49160, 49802, 51550, 53340, 53972,
88  54648, 55348, 55994, 56122, 56154, 56186, 56218, 56346, 56410, 56442,
89  56474, 57878, 59290, 60636, 62036, 62682, 64460, 64524, 64588, 64716,
90  64844, 66076, 67466, 67978, 68542, 69064, 69648, 70296, 72010, 72074,
91  72138, 72202, 72330, 73572, 74936, 75454, 76030, 76566, 77176, 77822,
92  79582, 79646, 79678, 79742, 79870, 81180, 82536, 83064, 83672, 84242,
93  84934, 85576, 87384, 87448, 87480, 87544, 87672, 88982, 90340, 90902,
94  91598, 92182, 92846, 93488, 95246, 95278, 95310, 95374, 95502, 96878,
95  98266, 98848, 99542, 100234, 100884, 101524, 103320, 103352, 103384, 103416,
96  103480, 104874, 106222, 106910, 107584, 108258, 108902, 109544, 111366, 111398,
97  111430, 111462, 111494, 112878, 114320, 114988, 115660, 116310, 116950, 117592
98 };
99 
100 static VLC_TYPE table_data[117592][2];
101 
102 /**
103  * Generate VLC from codeword lengths.
104  * @param bits codeword lengths (zeroes are accepted)
105  * @param size length of input data
106  * @param vlc output VLC
107  * @param insyms symbols for input codes (NULL for default ones)
108  * @param num VLC table number (for static initialization)
109  */
110 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms,
111  const int num)
112 {
113  int counts[17] = {0}, codes[17];
114  uint16_t cw[MAX_VLC_SIZE];
115  int maxbits;
116 
117  for (int i = 0; i < size; i++)
118  counts[bits[i]]++;
119 
120  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
121  * So we reset it here. The code assigned to this element is 0x00. */
122  codes[0] = counts[0] = 0;
123  for (int i = 0; i < 16; i++) {
124  codes[i+1] = (codes[i] + counts[i]) << 1;
125  if (counts[i])
126  maxbits = i;
127  }
128  for (int i = 0; i < size; i++)
129  cw[i] = codes[bits[i]]++;
130 
131  vlc->table = &table_data[table_offs[num]];
132  vlc->table_allocated = table_offs[num + 1] - table_offs[num];
133  ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), size,
134  bits, 1, 1,
135  cw, 2, 2,
136  syms, !!syms, !!syms, INIT_VLC_USE_NEW_STATIC);
137 }
138 
139 /**
140  * Initialize all tables.
141  */
142 static av_cold void rv34_init_tables(void)
143 {
144  int i, j, k;
145 
146  for(i = 0; i < NUM_INTRA_TABLES; i++){
147  for(j = 0; j < 2; j++){
148  rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL, 19*i + 0 + j);
149  rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL, 19*i + 2 + j);
150  rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL, 19*i + 4 + j);
151  for(k = 0; k < 4; k++){
152  rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code, 19*i + 6 + j*4 + k);
153  }
154  }
155  for(j = 0; j < 4; j++){
156  rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL, 19*i + 14 + j);
157  }
158  rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL, 19*i + 18);
159  }
160 
161  for(i = 0; i < NUM_INTER_TABLES; i++){
162  rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL, i*12 + 95);
163  for(j = 0; j < 4; j++){
164  rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code, i*12 + 96 + j);
165  }
166  for(j = 0; j < 2; j++){
167  rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL, i*12 + 100 + j);
168  rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL, i*12 + 102 + j);
169  rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL, i*12 + 104 + j);
170  }
171  rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL, i*12 + 106);
172  }
173 }
174 
175 /** @} */ // vlc group
176 
177 /**
178  * @name RV30/40 4x4 block decoding functions
179  * @{
180  */
181 
182 /**
183  * Decode coded block pattern.
184  */
185 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
186 {
187  int pattern, code, cbp=0;
188  int ones;
189  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
190  static const int shifts[4] = { 0, 2, 8, 10 };
191  const int *curshift = shifts;
192  int i, t, mask;
193 
194  code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
195  pattern = code & 0xF;
196  code >>= 4;
197 
198  ones = rv34_count_ones[pattern];
199 
200  for(mask = 8; mask; mask >>= 1, curshift++){
201  if(pattern & mask)
202  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
203  }
204 
205  for(i = 0; i < 4; i++){
206  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
207  if(t == 1)
208  cbp |= cbp_masks[get_bits1(gb)] << i;
209  if(t == 2)
210  cbp |= cbp_masks[2] << i;
211  }
212  return cbp;
213 }
214 
215 /**
216  * Get one coefficient value from the bitstream and store it.
217  */
218 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC* vlc, int q)
219 {
220  if(coef){
221  if(coef == esc){
222  coef = get_vlc2(gb, vlc->table, 9, 2);
223  if(coef > 23){
224  coef -= 23;
225  coef = 22 + ((1 << coef) | get_bits(gb, coef));
226  }
227  coef += esc;
228  }
229  if(get_bits1(gb))
230  coef = -coef;
231  *dst = (coef*q + 8) >> 4;
232  }
233 }
234 
235 /**
236  * Decode 2x2 subblock of coefficients.
237  */
238 static inline void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
239 {
241 
242  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
243  if(is_block2){
244  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
245  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
246  }else{
247  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
248  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
249  }
250  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
251 }
252 
253 /**
254  * Decode a single coefficient.
255  */
256 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
257 {
258  int coeff = modulo_three_table[code] >> 6;
259  decode_coeff(dst, coeff, 3, gb, vlc, q);
260 }
261 
262 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc,
263  int q_dc, int q_ac1, int q_ac2)
264 {
266 
267  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
268  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
269  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
270  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
271 }
272 
273 /**
274  * Decode coefficients for 4x4 block.
275  *
276  * This is done by filling 2x2 subblocks with decoded coefficients
277  * in this order (the same for subblocks and subblock coefficients):
278  * o--o
279  * /
280  * /
281  * o--o
282  */
283 
284 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
285 {
286  int code, pattern, has_ac = 1;
287 
288  code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
289 
290  pattern = code & 0x7;
291 
292  code >>= 3;
293 
294  if (modulo_three_table[code] & 0x3F) {
295  decode_subblock3(dst, code, gb, &rvlc->coefficient, q_dc, q_ac1, q_ac2);
296  } else {
297  decode_subblock1(dst, code, gb, &rvlc->coefficient, q_dc);
298  if (!pattern)
299  return 0;
300  has_ac = 0;
301  }
302 
303  if(pattern & 4){
304  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
305  decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
306  }
307  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
308  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
309  decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
310  }
311  if(pattern & 1){
312  code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
313  decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
314  }
315  return has_ac | pattern;
316 }
317 
318 /**
319  * @name RV30/40 bitstream parsing
320  * @{
321  */
322 
323 /**
324  * Decode starting slice position.
325  * @todo Maybe replace with ff_h263_decode_mba() ?
326  */
328 {
329  int i;
330  for(i = 0; i < 5; i++)
331  if(rv34_mb_max_sizes[i] >= mb_size - 1)
332  break;
333  return rv34_mb_bits_sizes[i];
334 }
335 
336 /**
337  * Select VLC set for decoding from current quantizer, modifier and frame type.
338  */
339 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
340 {
341  if(mod == 2 && quant < 19) quant += 10;
342  else if(mod && quant < 26) quant += 5;
343  av_assert2(quant >= 0 && quant < 32);
344  return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][quant]]
345  : &intra_vlcs[rv34_quant_to_vlc_set[0][quant]];
346 }
347 
348 /**
349  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
350  */
351 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
352 {
353  MpegEncContext *s = &r->s;
354  GetBitContext *gb = &s->gb;
355  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
356  int t;
357 
358  r->is16 = get_bits1(gb);
359  if(r->is16){
362  t = get_bits(gb, 2);
363  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
364  r->luma_vlc = 2;
365  }else{
366  if(!r->rv30){
367  if(!get_bits1(gb))
368  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
369  }
372  if(r->decode_intra_types(r, gb, intra_types) < 0)
373  return -1;
374  r->luma_vlc = 1;
375  }
376 
377  r->chroma_vlc = 0;
378  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
379 
380  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
381 }
382 
383 /**
384  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
385  */
386 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
387 {
388  MpegEncContext *s = &r->s;
389  GetBitContext *gb = &s->gb;
390  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
391  int i, t;
392 
393  r->block_type = r->decode_mb_info(r);
394  if(r->block_type == -1)
395  return -1;
397  r->mb_type[mb_pos] = r->block_type;
398  if(r->block_type == RV34_MB_SKIP){
399  if(s->pict_type == AV_PICTURE_TYPE_P)
400  r->mb_type[mb_pos] = RV34_MB_P_16x16;
401  if(s->pict_type == AV_PICTURE_TYPE_B)
402  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
403  }
404  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
405  if (rv34_decode_mv(r, r->block_type) < 0)
406  return -1;
407  if(r->block_type == RV34_MB_SKIP){
408  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
409  return 0;
410  }
411  r->chroma_vlc = 1;
412  r->luma_vlc = 0;
413 
414  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
415  if(r->is16){
416  t = get_bits(gb, 2);
417  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
418  r->luma_vlc = 2;
419  }else{
420  if(r->decode_intra_types(r, gb, intra_types) < 0)
421  return -1;
422  r->luma_vlc = 1;
423  }
424  r->chroma_vlc = 0;
425  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
426  }else{
427  for(i = 0; i < 16; i++)
428  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
429  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
430  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
431  r->is16 = 1;
432  r->chroma_vlc = 1;
433  r->luma_vlc = 2;
434  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
435  }
436  }
437 
438  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
439 }
440 
441 /** @} */ //bitstream functions
442 
443 /**
444  * @name motion vector related code (prediction, reconstruction, motion compensation)
445  * @{
446  */
447 
448 /** macroblock partition width in 8x8 blocks */
449 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
450 
451 /** macroblock partition height in 8x8 blocks */
452 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
453 
454 /** availability index for subblocks */
455 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
456 
457 /**
458  * motion vector prediction
459  *
460  * Motion prediction performed for the block by using median prediction of
461  * motion vectors from the left, top and right top blocks but in corner cases
462  * some other vectors may be used instead.
463  */
464 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
465 {
466  MpegEncContext *s = &r->s;
467  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
468  int A[2] = {0}, B[2], C[2];
469  int i, j;
470  int mx, my;
471  int* avail = r->avail_cache + avail_indexes[subblock_no];
472  int c_off = part_sizes_w[block_type];
473 
474  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
475  if(subblock_no == 3)
476  c_off = -1;
477 
478  if(avail[-1]){
479  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
480  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
481  }
482  if(avail[-4]){
483  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
484  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
485  }else{
486  B[0] = A[0];
487  B[1] = A[1];
488  }
489  if(!avail[c_off-4]){
490  if(avail[-4] && (avail[-1] || r->rv30)){
491  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
492  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
493  }else{
494  C[0] = A[0];
495  C[1] = A[1];
496  }
497  }else{
498  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
499  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
500  }
501  mx = mid_pred(A[0], B[0], C[0]);
502  my = mid_pred(A[1], B[1], C[1]);
503  mx += r->dmv[dmv_no][0];
504  my += r->dmv[dmv_no][1];
505  for(j = 0; j < part_sizes_h[block_type]; j++){
506  for(i = 0; i < part_sizes_w[block_type]; i++){
507  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
508  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
509  }
510  }
511 }
512 
513 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
514 
515 /**
516  * Calculate motion vector component that should be added for direct blocks.
517  */
518 static int calc_add_mv(RV34DecContext *r, int dir, int val)
519 {
520  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
521 
522  return (int)(val * (SUINT)mul + 0x2000) >> 14;
523 }
524 
525 /**
526  * Predict motion vector for B-frame macroblock.
527  */
528 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
529  int A_avail, int B_avail, int C_avail,
530  int *mx, int *my)
531 {
532  if(A_avail + B_avail + C_avail != 3){
533  *mx = A[0] + B[0] + C[0];
534  *my = A[1] + B[1] + C[1];
535  if(A_avail + B_avail + C_avail == 2){
536  *mx /= 2;
537  *my /= 2;
538  }
539  }else{
540  *mx = mid_pred(A[0], B[0], C[0]);
541  *my = mid_pred(A[1], B[1], C[1]);
542  }
543 }
544 
545 /**
546  * motion vector prediction for B-frames
547  */
548 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
549 {
550  MpegEncContext *s = &r->s;
551  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
552  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
553  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
554  int has_A = 0, has_B = 0, has_C = 0;
555  int mx, my;
556  int i, j;
557  Picture *cur_pic = s->current_picture_ptr;
558  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
559  int type = cur_pic->mb_type[mb_pos];
560 
561  if((r->avail_cache[6-1] & type) & mask){
562  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
563  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
564  has_A = 1;
565  }
566  if((r->avail_cache[6-4] & type) & mask){
567  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
568  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
569  has_B = 1;
570  }
571  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
572  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
573  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
574  has_C = 1;
575  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
576  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
577  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
578  has_C = 1;
579  }
580 
581  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
582 
583  mx += r->dmv[dir][0];
584  my += r->dmv[dir][1];
585 
586  for(j = 0; j < 2; j++){
587  for(i = 0; i < 2; i++){
588  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
589  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
590  }
591  }
592  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
593  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
594  }
595 }
596 
597 /**
598  * motion vector prediction - RV3 version
599  */
600 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
601 {
602  MpegEncContext *s = &r->s;
603  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
604  int A[2] = {0}, B[2], C[2];
605  int i, j, k;
606  int mx, my;
607  int* avail = r->avail_cache + avail_indexes[0];
608 
609  if(avail[-1]){
610  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
611  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
612  }
613  if(avail[-4]){
614  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
615  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
616  }else{
617  B[0] = A[0];
618  B[1] = A[1];
619  }
620  if(!avail[-4 + 2]){
621  if(avail[-4] && (avail[-1])){
622  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
623  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
624  }else{
625  C[0] = A[0];
626  C[1] = A[1];
627  }
628  }else{
629  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
630  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
631  }
632  mx = mid_pred(A[0], B[0], C[0]);
633  my = mid_pred(A[1], B[1], C[1]);
634  mx += r->dmv[0][0];
635  my += r->dmv[0][1];
636  for(j = 0; j < 2; j++){
637  for(i = 0; i < 2; i++){
638  for(k = 0; k < 2; k++){
639  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
640  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
641  }
642  }
643  }
644 }
645 
646 static const int chroma_coeffs[3] = { 0, 3, 5 };
647 
648 /**
649  * generic motion compensation function
650  *
651  * @param r decoder context
652  * @param block_type type of the current block
653  * @param xoff horizontal offset from the start of the current block
654  * @param yoff vertical offset from the start of the current block
655  * @param mv_off offset to the motion vector information
656  * @param width width of the current partition in 8x8 blocks
657  * @param height height of the current partition in 8x8 blocks
658  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
659  * @param thirdpel motion vectors are specified in 1/3 of pixel
660  * @param qpel_mc a set of functions used to perform luma motion compensation
661  * @param chroma_mc a set of functions used to perform chroma motion compensation
662  */
663 static inline void rv34_mc(RV34DecContext *r, const int block_type,
664  const int xoff, const int yoff, int mv_off,
665  const int width, const int height, int dir,
666  const int thirdpel, int weighted,
667  qpel_mc_func (*qpel_mc)[16],
669 {
670  MpegEncContext *s = &r->s;
671  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
672  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
673  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
674  int is16x16 = 1;
675  int emu = 0;
676 
677  if(thirdpel){
678  int chroma_mx, chroma_my;
679  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
680  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
681  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
682  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
683  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
684  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
685  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
686  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
687  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
688  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
689  }else{
690  int cx, cy;
691  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
692  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
693  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
694  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
695  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
696  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
697  umx = cx >> 2;
698  umy = cy >> 2;
699  uvmx = (cx & 3) << 1;
700  uvmy = (cy & 3) << 1;
701  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
702  if(uvmx == 6 && uvmy == 6)
703  uvmx = uvmy = 4;
704  }
705 
706  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
707  /* wait for the referenced mb row to be finished */
708  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
709  ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
710  ff_thread_await_progress(f, mb_row, 0);
711  }
712 
713  dxy = ly*4 + lx;
714  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
715  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
716  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
717  src_x = s->mb_x * 16 + xoff + mx;
718  src_y = s->mb_y * 16 + yoff + my;
719  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
720  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
721  srcY += src_y * s->linesize + src_x;
722  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
723  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
724  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
725  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
726  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
727  srcY -= 2 + 2*s->linesize;
729  s->linesize, s->linesize,
730  (width << 3) + 6, (height << 3) + 6,
731  src_x - 2, src_y - 2,
732  s->h_edge_pos, s->v_edge_pos);
733  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
734  emu = 1;
735  }
736  if(!weighted){
737  Y = s->dest[0] + xoff + yoff *s->linesize;
738  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
739  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
740  }else{
741  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
742  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
743  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
744  }
745 
746  if(block_type == RV34_MB_P_16x8){
747  qpel_mc[1][dxy](Y, srcY, s->linesize);
748  Y += 8;
749  srcY += 8;
750  }else if(block_type == RV34_MB_P_8x16){
751  qpel_mc[1][dxy](Y, srcY, s->linesize);
752  Y += 8 * s->linesize;
753  srcY += 8 * s->linesize;
754  }
755  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
756  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
757  if (emu) {
758  uint8_t *uvbuf = s->sc.edge_emu_buffer;
759 
760  s->vdsp.emulated_edge_mc(uvbuf, srcU,
761  s->uvlinesize, s->uvlinesize,
762  (width << 2) + 1, (height << 2) + 1,
763  uvsrc_x, uvsrc_y,
764  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
765  srcU = uvbuf;
766  uvbuf += 9*s->uvlinesize;
767 
768  s->vdsp.emulated_edge_mc(uvbuf, srcV,
769  s->uvlinesize, s->uvlinesize,
770  (width << 2) + 1, (height << 2) + 1,
771  uvsrc_x, uvsrc_y,
772  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
773  srcV = uvbuf;
774  }
775  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
776  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
777 }
778 
779 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
780  const int xoff, const int yoff, int mv_off,
781  const int width, const int height, int dir)
782 {
783  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
784  r->rdsp.put_pixels_tab,
786 }
787 
788 static void rv4_weight(RV34DecContext *r)
789 {
791  r->tmp_b_block_y[0],
792  r->tmp_b_block_y[1],
793  r->weight1,
794  r->weight2,
795  r->s.linesize);
797  r->tmp_b_block_uv[0],
798  r->tmp_b_block_uv[2],
799  r->weight1,
800  r->weight2,
801  r->s.uvlinesize);
803  r->tmp_b_block_uv[1],
804  r->tmp_b_block_uv[3],
805  r->weight1,
806  r->weight2,
807  r->s.uvlinesize);
808 }
809 
810 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
811 {
812  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
813 
814  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
815  r->rdsp.put_pixels_tab,
817  if(!weighted){
818  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
819  r->rdsp.avg_pixels_tab,
821  }else{
822  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
823  r->rdsp.put_pixels_tab,
825  rv4_weight(r);
826  }
827 }
828 
830 {
831  int i, j;
832  int weighted = !r->rv30 && r->weight1 != 8192;
833 
834  for(j = 0; j < 2; j++)
835  for(i = 0; i < 2; i++){
836  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
837  weighted,
838  r->rdsp.put_pixels_tab,
840  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
841  weighted,
842  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
844  }
845  if(weighted)
846  rv4_weight(r);
847 }
848 
849 /** number of motion vectors in each macroblock type */
850 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
851 
852 /**
853  * Decode motion vector differences
854  * and perform motion vector reconstruction and motion compensation.
855  */
856 static int rv34_decode_mv(RV34DecContext *r, int block_type)
857 {
858  MpegEncContext *s = &r->s;
859  GetBitContext *gb = &s->gb;
860  int i, j, k, l;
861  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
862  int next_bt;
863 
864  memset(r->dmv, 0, sizeof(r->dmv));
865  for(i = 0; i < num_mvs[block_type]; i++){
866  r->dmv[i][0] = get_interleaved_se_golomb(gb);
867  r->dmv[i][1] = get_interleaved_se_golomb(gb);
868  if (r->dmv[i][0] == INVALID_VLC ||
869  r->dmv[i][1] == INVALID_VLC) {
870  r->dmv[i][0] = r->dmv[i][1] = 0;
871  return AVERROR_INVALIDDATA;
872  }
873  }
874  switch(block_type){
875  case RV34_MB_TYPE_INTRA:
877  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
878  return 0;
879  case RV34_MB_SKIP:
880  if(s->pict_type == AV_PICTURE_TYPE_P){
881  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
882  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
883  break;
884  }
885  case RV34_MB_B_DIRECT:
886  //surprisingly, it uses motion scheme from next reference frame
887  /* wait for the current mb row to be finished */
888  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
890 
891  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
892  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
893  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
894  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
895  }else
896  for(j = 0; j < 2; j++)
897  for(i = 0; i < 2; i++)
898  for(k = 0; k < 2; k++)
899  for(l = 0; l < 2; l++)
900  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
901  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
902  rv34_mc_2mv(r, block_type);
903  else
904  rv34_mc_2mv_skip(r);
905  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
906  break;
907  case RV34_MB_P_16x16:
908  case RV34_MB_P_MIX16x16:
909  rv34_pred_mv(r, block_type, 0, 0);
910  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
911  break;
912  case RV34_MB_B_FORWARD:
913  case RV34_MB_B_BACKWARD:
914  r->dmv[1][0] = r->dmv[0][0];
915  r->dmv[1][1] = r->dmv[0][1];
916  if(r->rv30)
917  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
918  else
919  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
920  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
921  break;
922  case RV34_MB_P_16x8:
923  case RV34_MB_P_8x16:
924  rv34_pred_mv(r, block_type, 0, 0);
925  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
926  if(block_type == RV34_MB_P_16x8){
927  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
928  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
929  }
930  if(block_type == RV34_MB_P_8x16){
931  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
932  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
933  }
934  break;
935  case RV34_MB_B_BIDIR:
936  rv34_pred_mv_b (r, block_type, 0);
937  rv34_pred_mv_b (r, block_type, 1);
938  rv34_mc_2mv (r, block_type);
939  break;
940  case RV34_MB_P_8x8:
941  for(i=0;i< 4;i++){
942  rv34_pred_mv(r, block_type, i, i);
943  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
944  }
945  break;
946  }
947 
948  return 0;
949 }
950 /** @} */ // mv group
951 
952 /**
953  * @name Macroblock reconstruction functions
954  * @{
955  */
956 /** mapping of RV30/40 intra prediction types to standard H.264 types */
957 static const int ittrans[9] = {
960 };
961 
962 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
963 static const int ittrans16[4] = {
965 };
966 
967 /**
968  * Perform 4x4 intra prediction.
969  */
970 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
971 {
972  uint8_t *prev = dst - stride + 4;
973  uint32_t topleft;
974 
975  if(!up && !left)
976  itype = DC_128_PRED;
977  else if(!up){
978  if(itype == VERT_PRED) itype = HOR_PRED;
979  if(itype == DC_PRED) itype = LEFT_DC_PRED;
980  }else if(!left){
981  if(itype == HOR_PRED) itype = VERT_PRED;
982  if(itype == DC_PRED) itype = TOP_DC_PRED;
984  }
985  if(!down){
987  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
988  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
989  }
990  if(!right && up){
991  topleft = dst[-stride + 3] * 0x01010101u;
992  prev = (uint8_t*)&topleft;
993  }
994  r->h.pred4x4[itype](dst, prev, stride);
995 }
996 
997 static inline int adjust_pred16(int itype, int up, int left)
998 {
999  if(!up && !left)
1000  itype = DC_128_PRED8x8;
1001  else if(!up){
1002  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1003  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1004  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1005  }else if(!left){
1006  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1007  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1008  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1009  }
1010  return itype;
1011 }
1012 
1013 static inline void rv34_process_block(RV34DecContext *r,
1014  uint8_t *pdst, int stride,
1015  int fc, int sc, int q_dc, int q_ac)
1016 {
1017  MpegEncContext *s = &r->s;
1018  int16_t *ptr = s->block[0];
1019  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1020  fc, sc, q_dc, q_ac, q_ac);
1021  if(has_ac){
1022  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1023  }else{
1024  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1025  ptr[0] = 0;
1026  }
1027 }
1028 
1029 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1030 {
1031  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1032  MpegEncContext *s = &r->s;
1033  GetBitContext *gb = &s->gb;
1034  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1035  q_ac = rv34_qscale_tab[s->qscale];
1036  uint8_t *dst = s->dest[0];
1037  int16_t *ptr = s->block[0];
1038  int i, j, itype, has_ac;
1039 
1040  memset(block16, 0, 16 * sizeof(*block16));
1041 
1042  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1043  if(has_ac)
1044  r->rdsp.rv34_inv_transform(block16);
1045  else
1046  r->rdsp.rv34_inv_transform_dc(block16);
1047 
1048  itype = ittrans16[intra_types[0]];
1049  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1050  r->h.pred16x16[itype](dst, s->linesize);
1051 
1052  for(j = 0; j < 4; j++){
1053  for(i = 0; i < 4; i++, cbp >>= 1){
1054  int dc = block16[i + j*4];
1055 
1056  if(cbp & 1){
1057  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1058  }else
1059  has_ac = 0;
1060 
1061  if(has_ac){
1062  ptr[0] = dc;
1063  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1064  }else
1065  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1066  }
1067 
1068  dst += 4*s->linesize;
1069  }
1070 
1071  itype = ittrans16[intra_types[0]];
1072  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1073  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1074 
1075  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1076  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1077 
1078  for(j = 1; j < 3; j++){
1079  dst = s->dest[j];
1080  r->h.pred8x8[itype](dst, s->uvlinesize);
1081  for(i = 0; i < 4; i++, cbp >>= 1){
1082  uint8_t *pdst;
1083  if(!(cbp & 1)) continue;
1084  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1085 
1086  rv34_process_block(r, pdst, s->uvlinesize,
1087  r->chroma_vlc, 1, q_dc, q_ac);
1088  }
1089  }
1090 }
1091 
1092 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1093 {
1094  MpegEncContext *s = &r->s;
1095  uint8_t *dst = s->dest[0];
1096  int avail[6*8] = {0};
1097  int i, j, k;
1098  int idx, q_ac, q_dc;
1099 
1100  // Set neighbour information.
1101  if(r->avail_cache[1])
1102  avail[0] = 1;
1103  if(r->avail_cache[2])
1104  avail[1] = avail[2] = 1;
1105  if(r->avail_cache[3])
1106  avail[3] = avail[4] = 1;
1107  if(r->avail_cache[4])
1108  avail[5] = 1;
1109  if(r->avail_cache[5])
1110  avail[8] = avail[16] = 1;
1111  if(r->avail_cache[9])
1112  avail[24] = avail[32] = 1;
1113 
1114  q_ac = rv34_qscale_tab[s->qscale];
1115  for(j = 0; j < 4; j++){
1116  idx = 9 + j*8;
1117  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1118  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1119  avail[idx] = 1;
1120  if(!(cbp & 1)) continue;
1121 
1122  rv34_process_block(r, dst, s->linesize,
1123  r->luma_vlc, 0, q_ac, q_ac);
1124  }
1125  dst += s->linesize * 4 - 4*4;
1126  intra_types += r->intra_types_stride;
1127  }
1128 
1129  intra_types -= r->intra_types_stride * 4;
1130 
1131  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1132  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1133 
1134  for(k = 0; k < 2; k++){
1135  dst = s->dest[1+k];
1136  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1137 
1138  for(j = 0; j < 2; j++){
1139  int* acache = r->avail_cache + 6 + j*4;
1140  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1141  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1142  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1143  acache[0] = 1;
1144 
1145  if(!(cbp&1)) continue;
1146 
1147  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1148  r->chroma_vlc, 1, q_dc, q_ac);
1149  }
1150 
1151  dst += 4*s->uvlinesize;
1152  }
1153  }
1154 }
1155 
1156 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1157 {
1158  int d;
1159  d = motion_val[0][0] - motion_val[-step][0];
1160  if(d < -3 || d > 3)
1161  return 1;
1162  d = motion_val[0][1] - motion_val[-step][1];
1163  if(d < -3 || d > 3)
1164  return 1;
1165  return 0;
1166 }
1167 
1169 {
1170  MpegEncContext *s = &r->s;
1171  int hmvmask = 0, vmvmask = 0, i, j;
1172  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1173  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1174  for(j = 0; j < 16; j += 8){
1175  for(i = 0; i < 2; i++){
1176  if(is_mv_diff_gt_3(motion_val + i, 1))
1177  vmvmask |= 0x11 << (j + i*2);
1178  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1179  hmvmask |= 0x03 << (j + i*2);
1180  }
1181  motion_val += s->b8_stride;
1182  }
1183  if(s->first_slice_line)
1184  hmvmask &= ~0x000F;
1185  if(!s->mb_x)
1186  vmvmask &= ~0x1111;
1187  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1188  vmvmask |= (vmvmask & 0x4444) >> 1;
1189  hmvmask |= (hmvmask & 0x0F00) >> 4;
1190  if(s->mb_x)
1191  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1192  if(!s->first_slice_line)
1193  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1194  }
1195  return hmvmask | vmvmask;
1196 }
1197 
1198 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1199 {
1200  MpegEncContext *s = &r->s;
1201  GetBitContext *gb = &s->gb;
1202  uint8_t *dst = s->dest[0];
1203  int16_t *ptr = s->block[0];
1204  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1205  int cbp, cbp2;
1206  int q_dc, q_ac, has_ac;
1207  int i, j;
1208  int dist;
1209 
1210  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1211  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1212  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1213  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1214  if(s->mb_x && dist)
1215  r->avail_cache[5] =
1216  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1217  if(dist >= s->mb_width)
1218  r->avail_cache[2] =
1219  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1220  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1221  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1222  if(s->mb_x && dist > s->mb_width)
1223  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1224 
1225  s->qscale = r->si.quant;
1226  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1227  r->cbp_luma [mb_pos] = cbp;
1228  r->cbp_chroma[mb_pos] = cbp >> 16;
1229  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1230  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1231 
1232  if(cbp == -1)
1233  return -1;
1234 
1235  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1236  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1237  else rv34_output_intra(r, intra_types, cbp);
1238  return 0;
1239  }
1240 
1241  if(r->is16){
1242  // Only for RV34_MB_P_MIX16x16
1243  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1244  memset(block16, 0, 16 * sizeof(*block16));
1245  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1246  q_ac = rv34_qscale_tab[s->qscale];
1247  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1248  r->rdsp.rv34_inv_transform(block16);
1249  else
1250  r->rdsp.rv34_inv_transform_dc(block16);
1251 
1252  q_ac = rv34_qscale_tab[s->qscale];
1253 
1254  for(j = 0; j < 4; j++){
1255  for(i = 0; i < 4; i++, cbp >>= 1){
1256  int dc = block16[i + j*4];
1257 
1258  if(cbp & 1){
1259  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1260  }else
1261  has_ac = 0;
1262 
1263  if(has_ac){
1264  ptr[0] = dc;
1265  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1266  }else
1267  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1268  }
1269 
1270  dst += 4*s->linesize;
1271  }
1272 
1273  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1274  }else{
1275  q_ac = rv34_qscale_tab[s->qscale];
1276 
1277  for(j = 0; j < 4; j++){
1278  for(i = 0; i < 4; i++, cbp >>= 1){
1279  if(!(cbp & 1)) continue;
1280 
1281  rv34_process_block(r, dst + 4*i, s->linesize,
1282  r->luma_vlc, 0, q_ac, q_ac);
1283  }
1284  dst += 4*s->linesize;
1285  }
1286  }
1287 
1288  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1289  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1290 
1291  for(j = 1; j < 3; j++){
1292  dst = s->dest[j];
1293  for(i = 0; i < 4; i++, cbp >>= 1){
1294  uint8_t *pdst;
1295  if(!(cbp & 1)) continue;
1296  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1297 
1298  rv34_process_block(r, pdst, s->uvlinesize,
1299  r->chroma_vlc, 1, q_dc, q_ac);
1300  }
1301  }
1302 
1303  return 0;
1304 }
1305 
1306 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1307 {
1308  MpegEncContext *s = &r->s;
1309  int cbp, dist;
1310  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1311 
1312  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1313  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1314  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1315  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1316  if(s->mb_x && dist)
1317  r->avail_cache[5] =
1318  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1319  if(dist >= s->mb_width)
1320  r->avail_cache[2] =
1321  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1322  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1323  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1324  if(s->mb_x && dist > s->mb_width)
1325  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1326 
1327  s->qscale = r->si.quant;
1328  cbp = rv34_decode_intra_mb_header(r, intra_types);
1329  r->cbp_luma [mb_pos] = cbp;
1330  r->cbp_chroma[mb_pos] = cbp >> 16;
1331  r->deblock_coefs[mb_pos] = 0xFFFF;
1332  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1333 
1334  if(cbp == -1)
1335  return -1;
1336 
1337  if(r->is16){
1338  rv34_output_i16x16(r, intra_types, cbp);
1339  return 0;
1340  }
1341 
1342  rv34_output_intra(r, intra_types, cbp);
1343  return 0;
1344 }
1345 
1347 {
1348  int bits;
1349  if(s->mb_y >= s->mb_height)
1350  return 1;
1351  if(!s->mb_num_left)
1352  return 1;
1353  if(r->s.mb_skip_run > 1)
1354  return 0;
1355  bits = get_bits_left(&s->gb);
1356  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1357  return 1;
1358  return 0;
1359 }
1360 
1361 
1363 {
1365  r->intra_types = NULL;
1367  av_freep(&r->mb_type);
1368  av_freep(&r->cbp_luma);
1369  av_freep(&r->cbp_chroma);
1370  av_freep(&r->deblock_coefs);
1371 }
1372 
1373 
1375 {
1376  r->intra_types_stride = r->s.mb_width * 4 + 4;
1377 
1378  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1379  sizeof(*r->cbp_chroma));
1380  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1381  sizeof(*r->cbp_luma));
1383  sizeof(*r->deblock_coefs));
1385  sizeof(*r->intra_types_hist));
1386  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1387  sizeof(*r->mb_type));
1388 
1389  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1390  r->intra_types_hist && r->mb_type)) {
1391  rv34_decoder_free(r);
1392  return AVERROR(ENOMEM);
1393  }
1394 
1396 
1397  return 0;
1398 }
1399 
1400 
1402 {
1403  rv34_decoder_free(r);
1404  return rv34_decoder_alloc(r);
1405 }
1406 
1407 
1408 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1409 {
1410  MpegEncContext *s = &r->s;
1411  GetBitContext *gb = &s->gb;
1412  int mb_pos, slice_type;
1413  int res;
1414 
1415  init_get_bits(&r->s.gb, buf, buf_size*8);
1416  res = r->parse_slice_header(r, gb, &r->si);
1417  if(res < 0){
1418  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1419  return -1;
1420  }
1421 
1422  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1423  if (slice_type != s->pict_type) {
1424  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1425  return AVERROR_INVALIDDATA;
1426  }
1427  if (s->width != r->si.width || s->height != r->si.height) {
1428  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1429  return AVERROR_INVALIDDATA;
1430  }
1431 
1432  r->si.end = end;
1433  s->qscale = r->si.quant;
1434  s->mb_num_left = r->si.end - r->si.start;
1435  r->s.mb_skip_run = 0;
1436 
1437  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1438  if(r->si.start != mb_pos){
1439  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1440  s->mb_x = r->si.start % s->mb_width;
1441  s->mb_y = r->si.start / s->mb_width;
1442  }
1443  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1444  s->first_slice_line = 1;
1445  s->resync_mb_x = s->mb_x;
1446  s->resync_mb_y = s->mb_y;
1447 
1449  while(!check_slice_end(r, s)) {
1451 
1452  if(r->si.type)
1453  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1454  else
1455  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1456  if(res < 0){
1457  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1458  return -1;
1459  }
1460  if (++s->mb_x == s->mb_width) {
1461  s->mb_x = 0;
1462  s->mb_y++;
1464 
1465  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1466  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1467 
1468  if(r->loop_filter && s->mb_y >= 2)
1469  r->loop_filter(r, s->mb_y - 2);
1470 
1471  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1473  s->mb_y - 2, 0);
1474 
1475  }
1476  if(s->mb_x == s->resync_mb_x)
1477  s->first_slice_line=0;
1478  s->mb_num_left--;
1479  }
1480  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1481 
1482  return s->mb_y == s->mb_height;
1483 }
1484 
1485 /** @} */ // reconstruction group end
1486 
1487 /**
1488  * Initialize decoder.
1489  */
1491 {
1492  RV34DecContext *r = avctx->priv_data;
1493  MpegEncContext *s = &r->s;
1494  int ret;
1495 
1497  ff_mpv_decode_init(s, avctx);
1498  s->out_format = FMT_H263;
1499 
1500  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1501  avctx->has_b_frames = 1;
1502  s->low_delay = 0;
1503 
1504  ff_mpv_idct_init(s);
1505  if ((ret = ff_mpv_common_init(s)) < 0)
1506  return ret;
1507 
1508  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1509 
1510 #if CONFIG_RV30_DECODER
1511  if (avctx->codec_id == AV_CODEC_ID_RV30)
1512  ff_rv30dsp_init(&r->rdsp);
1513 #endif
1514 #if CONFIG_RV40_DECODER
1515  if (avctx->codec_id == AV_CODEC_ID_RV40)
1516  ff_rv40dsp_init(&r->rdsp);
1517 #endif
1518 
1519  if ((ret = rv34_decoder_alloc(r)) < 0) {
1520  ff_mpv_common_end(&r->s);
1521  return ret;
1522  }
1523 
1524  if(!intra_vlcs[0].cbppattern[0].bits)
1525  rv34_init_tables();
1526 
1527  return 0;
1528 }
1529 
1531 {
1532  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1533  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1534  int err;
1535 
1536  if (dst == src || !s1->context_initialized)
1537  return 0;
1538 
1539  if (s->height != s1->height || s->width != s1->width) {
1540  s->height = s1->height;
1541  s->width = s1->width;
1542  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1543  return err;
1544  if ((err = rv34_decoder_realloc(r)) < 0)
1545  return err;
1546  }
1547 
1548  r->cur_pts = r1->cur_pts;
1549  r->last_pts = r1->last_pts;
1550  r->next_pts = r1->next_pts;
1551 
1552  memset(&r->si, 0, sizeof(r->si));
1553 
1554  // Do no call ff_mpeg_update_thread_context on a partially initialized
1555  // decoder context.
1556  if (!s1->context_initialized)
1557  return 0;
1558 
1559  return ff_mpeg_update_thread_context(dst, src);
1560 }
1561 
1562 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1563 {
1564  if (n < slice_count) {
1565  if(avctx->slice_count) return avctx->slice_offset[n];
1566  else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1567  } else
1568  return buf_size;
1569 }
1570 
1571 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1572 {
1573  RV34DecContext *r = avctx->priv_data;
1574  MpegEncContext *s = &r->s;
1575  int got_picture = 0, ret;
1576 
1577  ff_er_frame_end(&s->er);
1578  ff_mpv_frame_end(s);
1579  s->mb_num_left = 0;
1580 
1581  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1583 
1584  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1585  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1586  return ret;
1589  got_picture = 1;
1590  } else if (s->last_picture_ptr) {
1591  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1592  return ret;
1595  got_picture = 1;
1596  }
1597 
1598  return got_picture;
1599 }
1600 
1601 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1602 {
1603  // attempt to keep aspect during typical resolution switches
1604  if (!sar.num)
1605  sar = (AVRational){1, 1};
1606 
1607  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1608  return sar;
1609 }
1610 
1612  void *data, int *got_picture_ptr,
1613  AVPacket *avpkt)
1614 {
1615  const uint8_t *buf = avpkt->data;
1616  int buf_size = avpkt->size;
1617  RV34DecContext *r = avctx->priv_data;
1618  MpegEncContext *s = &r->s;
1619  AVFrame *pict = data;
1620  SliceInfo si;
1621  int i, ret;
1622  int slice_count;
1623  const uint8_t *slices_hdr = NULL;
1624  int last = 0;
1625  int faulty_b = 0;
1626  int offset;
1627 
1628  /* no supplementary picture */
1629  if (buf_size == 0) {
1630  /* special case for last picture */
1631  if (s->low_delay==0 && s->next_picture_ptr) {
1632  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1633  return ret;
1634  s->next_picture_ptr = NULL;
1635 
1636  *got_picture_ptr = 1;
1637  }
1638  return 0;
1639  }
1640 
1641  if(!avctx->slice_count){
1642  slice_count = (*buf++) + 1;
1643  slices_hdr = buf + 4;
1644  buf += 8 * slice_count;
1645  buf_size -= 1 + 8 * slice_count;
1646  }else
1647  slice_count = avctx->slice_count;
1648 
1649  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1650  //parse first slice header to check whether this frame can be decoded
1651  if(offset < 0 || offset > buf_size){
1652  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1653  return AVERROR_INVALIDDATA;
1654  }
1655  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1656  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1657  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1658  return AVERROR_INVALIDDATA;
1659  }
1660  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1661  si.type == AV_PICTURE_TYPE_B) {
1662  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1663  "reference data.\n");
1664  faulty_b = 1;
1665  }
1666  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1667  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1668  || avctx->skip_frame >= AVDISCARD_ALL)
1669  return avpkt->size;
1670 
1671  /* first slice */
1672  if (si.start == 0) {
1673  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1674  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1675  s->mb_num_left);
1676  ff_er_frame_end(&s->er);
1677  ff_mpv_frame_end(s);
1678  }
1679 
1680  if (s->width != si.width || s->height != si.height) {
1681  int err;
1682 
1683  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1684  si.width, si.height);
1685 
1686  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1687  return AVERROR_INVALIDDATA;
1688 
1690  s->width, s->height, s->avctx->sample_aspect_ratio,
1691  si.width, si.height);
1692  s->width = si.width;
1693  s->height = si.height;
1694 
1695  err = ff_set_dimensions(s->avctx, s->width, s->height);
1696  if (err < 0)
1697  return err;
1698 
1699  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1700  return err;
1701  if ((err = rv34_decoder_realloc(r)) < 0)
1702  return err;
1703  }
1704  if (faulty_b)
1705  return AVERROR_INVALIDDATA;
1706  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1707  if (ff_mpv_frame_start(s, s->avctx) < 0)
1708  return -1;
1710  if (!r->tmp_b_block_base) {
1711  int i;
1712 
1713  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1714  for (i = 0; i < 2; i++)
1715  r->tmp_b_block_y[i] = r->tmp_b_block_base
1716  + i * 16 * s->linesize;
1717  for (i = 0; i < 4; i++)
1718  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1719  + (i >> 1) * 8 * s->uvlinesize
1720  + (i & 1) * 16;
1721  }
1722  r->cur_pts = si.pts;
1723  if (s->pict_type != AV_PICTURE_TYPE_B) {
1724  r->last_pts = r->next_pts;
1725  r->next_pts = r->cur_pts;
1726  } else {
1727  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1728  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1729  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1730 
1731  if(!refdist){
1732  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1733  r->scaled_weight = 0;
1734  }else{
1735  if (FFMAX(dist0, dist1) > refdist)
1736  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1737 
1738  r->mv_weight1 = (dist0 << 14) / refdist;
1739  r->mv_weight2 = (dist1 << 14) / refdist;
1740  if((r->mv_weight1|r->mv_weight2) & 511){
1741  r->weight1 = r->mv_weight1;
1742  r->weight2 = r->mv_weight2;
1743  r->scaled_weight = 0;
1744  }else{
1745  r->weight1 = r->mv_weight1 >> 9;
1746  r->weight2 = r->mv_weight2 >> 9;
1747  r->scaled_weight = 1;
1748  }
1749  }
1750  }
1751  s->mb_x = s->mb_y = 0;
1753  } else if (HAVE_THREADS &&
1755  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1756  "multithreading mode (start MB is %d).\n", si.start);
1757  return AVERROR_INVALIDDATA;
1758  }
1759 
1760  for(i = 0; i < slice_count; i++){
1761  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1762  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1763  int size;
1764 
1765  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1766  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1767  break;
1768  }
1769  size = offset1 - offset;
1770 
1771  r->si.end = s->mb_width * s->mb_height;
1772  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1773 
1774  if(i+1 < slice_count){
1775  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1776  if (offset2 < offset1 || offset2 > buf_size) {
1777  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1778  break;
1779  }
1780  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1781  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1782  size = offset2 - offset;
1783  }else
1784  r->si.end = si.start;
1785  }
1786  av_assert0 (size >= 0 && size <= buf_size - offset);
1787  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1788  if(last)
1789  break;
1790  }
1791 
1792  if (s->current_picture_ptr) {
1793  if (last) {
1794  if(r->loop_filter)
1795  r->loop_filter(r, s->mb_height - 1);
1796 
1797  ret = finish_frame(avctx, pict);
1798  if (ret < 0)
1799  return ret;
1800  *got_picture_ptr = ret;
1801  } else if (HAVE_THREADS &&
1803  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1804  /* always mark the current frame as finished, frame-mt supports
1805  * only complete frames */
1806  ff_er_frame_end(&s->er);
1807  ff_mpv_frame_end(s);
1808  s->mb_num_left = 0;
1810  return AVERROR_INVALIDDATA;
1811  }
1812  }
1813 
1814  return avpkt->size;
1815 }
1816 
1818 {
1819  RV34DecContext *r = avctx->priv_data;
1820 
1821  ff_mpv_common_end(&r->s);
1822  rv34_decoder_free(r);
1823 
1824  return 0;
1825 }
qpel_mc_func put_pixels_tab[4][16]
Definition: rv34dsp.h:58
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:54
void ff_rv40dsp_init(RV34DSPContext *c)
Definition: rv40dsp.c:620
#define VERT_PRED8x8
Definition: h264pred.h:70
#define NULL
Definition: coverity.c:32
int vlc_set
VLCs used for this slice.
Definition: rv34.h:76
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:67
discard all frames except keyframes
Definition: avcodec.h:235
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2254
Definition: vp9.h:47
#define MB_TYPE_L1
Definition: mpegutils.h:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
#define DC_128_PRED8x8
Definition: h264pred.h:76
int last_pts
Definition: rv34.h:107
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:51
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1029
uint8_t * tmp_b_block_y[2]
temporary blocks for RV4 weighted MC
Definition: rv34.h:122
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:59
uint32_t avail_cache[3 *4]
8x8 block available flags (for MV prediction)
Definition: rv34.h:119
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define ER_MB_END
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:957
B-frame macroblock, forward prediction.
Definition: rv34.h:47
int dmv[4][2]
differential motion vectors for the current macroblock
Definition: rv34.h:102
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:53
MpegEncContext s
Definition: rv34.h:85
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:284
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:351
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
void ff_er_frame_end(ERContext *s)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:275
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
int height
coded height
Definition: rv34.h:79
int num
Numerator.
Definition: rational.h:59
int size
Definition: packet.h:364
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:50
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
#define INVALID_VLC
Definition: golomb.h:38
#define MB_TYPE_INTRA
Definition: mpegutils.h:73
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation...
Definition: rv34.c:856
#define MB_TYPE_16x8
Definition: mpegutils.h:55
GLint GLenum type
Definition: opengl_enc.c:104
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
mpegvideo header.
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:528
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:64
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1571
int weight2
B-frame distance fractions (0.14) used in motion compensation.
Definition: rv34.h:109
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:513
RV30 and RV40 decoder common data declarations.
discard all
Definition: avcodec.h:236
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1408
#define SUINT
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:970
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int start
Definition: rv34.h:77
#define HOR_PRED8x8
Definition: h264pred.h:69
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:54
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
int qscale
QP.
Definition: mpegvideo.h:204
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
const uint8_t * luma_dc_quant_p
luma subblock DC quantizer for interframes
Definition: rv34.h:91
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:262
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2009
#define PLANE_PRED8x8
Definition: h264pred.h:71
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t * tmp_b_block_base
Definition: rv34.h:124
int mb_num_left
number of MBs left in this video packet (for partitioned Slices only)
Definition: mpegvideo.h:359
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1013
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define DC_PRED8x8
Definition: h264pred.h:68
int scaled_weight
Definition: rv34.h:108
uint16_t * cbp_luma
CBP values for luma subblocks.
Definition: rv34.h:114
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:119
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
static const int chroma_coeffs[3]
Definition: rv34.c:646
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:829
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:220
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1035
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Multithreading support functions.
Definition: vp9.h:46
int width
coded width
Definition: rv34.h:78
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:36
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:91
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
Definition: rv34dsp.h:67
uint16_t * deblock_coefs
deblock coefficients for each macroblock
Definition: rv34.h:116
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
quarterpel DSP functions
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:452
#define height
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:70
uint8_t * data
Definition: packet.h:363
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1156
Skipped block.
Definition: rv34.h:49
static VLC_TYPE table_data[117592][2]
Definition: rv34.c:100
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
ptrdiff_t size
Definition: opengl_enc.c:100
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1092
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
rv34_idct_add_func rv34_idct_add
Definition: rv34dsp.h:70
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:52
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:743
static const uint16_t table[]
Definition: prosumer.c:206
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:256
int slice_count
slice count
Definition: avcodec.h:885
ThreadFrame tf
Definition: mpegpicture.h:47
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
int quant
quantizer used for this slice
Definition: rv34.h:75
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:550
static const int table_offs[]
Definition: rv34.c:79
qpel_mc_func avg_pixels_tab[4][16]
Definition: rv34dsp.h:59
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:821
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:788
#define MB_TYPE_8x16
Definition: mpegutils.h:56
#define chroma_mc(a)
Definition: vc1dsp.c:783
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:963
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
static const uint16_t mask[17]
Definition: lzw.c:38
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
rv34_idct_dc_add_func rv34_idct_dc_add
Definition: rv34dsp.h:71
av_cold void ff_rv30dsp_init(RV34DSPContext *c)
Definition: rv30dsp.c:265
#define B
Definition: huffyuvdsp.h:32
ERContext er
Definition: mpegvideo.h:569
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1809
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:810
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:464
const char * r
Definition: vf_curves.c:114
int luma_vlc
which VLC set will be used for decoding of luma blocks
Definition: rv34.h:99
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1601
simple assert() macros that are a bit more flexible than ISO C assert().
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:303
#define IS_SKIP(a)
Definition: mpegutils.h:81
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1401
uint8_t bits
Definition: vp3data.h:202
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:406
GetBitContext gb
Definition: mpegvideo.h:451
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1110
#define FFMAX(a, b)
Definition: common.h:94
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
rv34_inv_transform_func rv34_inv_transform_dc
Definition: rv34dsp.h:69
VLC tables used by the decoder.
Definition: rv34.h:63
static float mul(float src0, float src1)
Definition: vlc.h:26
int end
start and end macroblocks of the slice
Definition: rv34.h:77
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1168
common internal API header
useful rectangle filling function
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1562
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:47
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
int(* parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
Definition: rv34.h:126
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1426
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:44
#define IS_16X8(a)
Definition: mpegutils.h:87
#define Y
Definition: boxblur.h:38
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:548
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1801
#define FFMIN(a, b)
Definition: common.h:96
int * mb_type
internal macroblock types
Definition: rv34.h:97
#define width
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:997
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
Picture.
Definition: mpegpicture.h:45
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:518
H264PredContext h
functions for 4x4 and 16x16 intra block prediction
Definition: rv34.h:94
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:339
VLC coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:69
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1374
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:66
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
int mv_weight1
Definition: rv34.h:110
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:491
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:667
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:779
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
int bits
Definition: vlc.h:27
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
RV30/40 VLC tables.
int table_allocated
Definition: vlc.h:29
int(* decode_mb_info)(struct RV34DecContext *r)
Definition: rv34.h:127
#define MB_TYPE_8x8
Definition: mpegutils.h:57
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:663
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1306
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
essential slice information
Definition: rv34.h:73
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
enum AVCodecID codec_id
Definition: avcodec.h:541
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:386
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:747
main external API structure.
Definition: avcodec.h:531
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:449
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
RV34VLC * cur_vlcs
VLC set used for current frame decoding.
Definition: rv34.h:93
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
SliceInfo si
current slice information
Definition: rv34.h:95
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1419
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:46
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1362
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:65
Rational number (pair of numerator and denominator).
Definition: rational.h:58
struct AVFrame * f
Definition: mpegpicture.h:46
#define IS_8X16(a)
Definition: mpegutils.h:88
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:142
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1490
#define mid_pred
Definition: mathops.h:97
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1611
#define s1
Definition: regdef.h:38
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1187
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1198
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
int intra_types_stride
block types array stride
Definition: rv34.h:89
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
miscellaneous RV30/40 tables
int(* decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst)
Definition: rv34.h:128
const uint8_t * quant
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:600
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1346
int is16
current block has additional 16x16 specific features or not
Definition: rv34.h:101
#define flags(name, subs,...)
Definition: cbs_av1.c:560
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
int8_t * intra_types
block types
Definition: rv34.h:88
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
P-frame macroblock, one motion frame.
Definition: rv34.h:45
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
int cur_pts
Definition: rv34.h:107
Definition: vp9.h:48
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:185
MpegEncContext.
Definition: mpegvideo.h:81
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:183
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1817
int8_t * qscale_table
Definition: mpegpicture.h:50
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
int weight1
Definition: rv34.h:109
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
discard all non reference
Definition: avcodec.h:232
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define MB_TYPE_L0L1
Definition: mpegutils.h:69
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:455
uint8_t * tmp_b_block_uv[4]
Definition: rv34.h:123
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:672
int mv_weight2
Definition: rv34.h:110
uint8_t * dest[3]
Definition: mpegvideo.h:295
B-frame macroblock, backward prediction.
Definition: rv34.h:48
#define INIT_VLC_USE_NEW_STATIC
Definition: vlc.h:60
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:124
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1530
static const uint8_t shifts[2][12]
Definition: camellia.c:174
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
Bi-dir predicted.
Definition: avutil.h:276
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:68
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:850
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:327
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
#define IS_INTRA(x, y)
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
decoder context
Definition: rv34.h:84
void * priv_data
Definition: avcodec.h:558
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:884
VideoDSPContext vdsp
Definition: mpegvideo.h:236
#define IS_8X8(a)
Definition: mpegutils.h:89
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1411
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:511
int block_type
current block type
Definition: rv34.h:98
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, const int num)
Generate VLC from codeword lengths.
Definition: rv34.c:110
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
int next_pts
Definition: rv34.h:107
const uint8_t * luma_dc_quant_i
luma subblock DC quantizer for intraframes
Definition: rv34.h:90
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int * slice_offset
slice offsets in the frame in bytes
Definition: avcodec.h:901
int8_t * intra_types_hist
old block types, used for prediction
Definition: rv34.h:87
rv34_inv_transform_func rv34_inv_transform
Definition: rv34dsp.h:68
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:238
#define av_freep(p)
int type
slice type (intra, inter)
Definition: rv34.h:74
h264_chroma_mc_func avg_chroma_pixels_tab[3]
Definition: rv34dsp.h:61
#define VLC_TYPE
Definition: vlc.h:24
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:218
#define stride
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:92
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:70
int rv30
indicates which RV variant is currently decoded
Definition: rv34.h:104
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
h264_chroma_mc_func put_chroma_pixels_tab[3]
Definition: rv34dsp.h:60
Intra macroblock.
Definition: rv34.h:43
void(* loop_filter)(struct RV34DecContext *r, int row)
Definition: rv34.h:129
int chroma_vlc
which VLC set will be used for decoding of chroma blocks
Definition: rv34.h:100
#define MB_TYPE_L0
Definition: mpegutils.h:67
for(j=16;j >0;--j)
RV34DSPContext rdsp
Definition: rv34.h:86
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
int pts
frame timestamp
Definition: rv34.h:80
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
#define V
Definition: avdct.c:30
uint8_t * cbp_chroma
CBP values for chroma subblocks.
Definition: rv34.h:115