FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/internal.h"
29 
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "golomb.h"
35 #include "internal.h"
36 #include "mathops.h"
37 #include "mpeg_er.h"
38 #include "qpeldsp.h"
39 #include "rectangle.h"
40 #include "thread.h"
41 
42 #include "rv34vlc.h"
43 #include "rv34data.h"
44 #include "rv34.h"
45 
46 static inline void ZERO8x2(void* dst, int stride)
47 {
48  fill_rectangle(dst, 1, 2, stride, 0, 4);
49  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
50 }
51 
52 /** translation of RV30/40 macroblock types to lavc ones */
53 static const int rv34_mb_type_to_lavc[12] = {
66 };
67 
68 
70 
71 static int rv34_decode_mv(RV34DecContext *r, int block_type);
72 
73 /**
74  * @name RV30/40 VLC generating functions
75  * @{
76  */
77 
78 static const int table_offs[] = {
79  0, 1818, 3622, 4144, 4698, 5234, 5804, 5868, 5900, 5932,
80  5996, 6252, 6316, 6348, 6380, 7674, 8944, 10274, 11668, 12250,
81  14060, 15846, 16372, 16962, 17512, 18148, 18180, 18212, 18244, 18308,
82  18564, 18628, 18660, 18692, 20036, 21314, 22648, 23968, 24614, 26384,
83  28190, 28736, 29366, 29938, 30608, 30640, 30672, 30704, 30768, 31024,
84  31088, 31120, 31184, 32570, 33898, 35236, 36644, 37286, 39020, 40802,
85  41368, 42052, 42692, 43348, 43380, 43412, 43444, 43476, 43604, 43668,
86  43700, 43732, 45100, 46430, 47778, 49160, 49802, 51550, 53340, 53972,
87  54648, 55348, 55994, 56122, 56154, 56186, 56218, 56346, 56410, 56442,
88  56474, 57878, 59290, 60636, 62036, 62682, 64460, 64524, 64588, 64716,
89  64844, 66076, 67466, 67978, 68542, 69064, 69648, 70296, 72010, 72074,
90  72138, 72202, 72330, 73572, 74936, 75454, 76030, 76566, 77176, 77822,
91  79582, 79646, 79678, 79742, 79870, 81180, 82536, 83064, 83672, 84242,
92  84934, 85576, 87384, 87448, 87480, 87544, 87672, 88982, 90340, 90902,
93  91598, 92182, 92846, 93488, 95246, 95278, 95310, 95374, 95502, 96878,
94  98266, 98848, 99542, 100234, 100884, 101524, 103320, 103352, 103384, 103416,
95  103480, 104874, 106222, 106910, 107584, 108258, 108902, 109544, 111366, 111398,
96  111430, 111462, 111494, 112878, 114320, 114988, 115660, 116310, 116950, 117592
97 };
98 
99 static VLC_TYPE table_data[117592][2];
100 
101 /**
102  * Generate VLC from codeword lengths.
103  * @param bits codeword lengths (zeroes are accepted)
104  * @param size length of input data
105  * @param vlc output VLC
106  * @param insyms symbols for input codes (NULL for default ones)
107  * @param num VLC table number (for static initialization)
108  */
109 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms,
110  const int num)
111 {
112  int i;
113  int counts[17] = {0}, codes[17];
114  uint16_t cw[MAX_VLC_SIZE], syms[MAX_VLC_SIZE];
116  int maxbits = 0, realsize = 0;
117 
118  for(i = 0; i < size; i++){
119  if(bits[i]){
120  bits2[realsize] = bits[i];
121  syms[realsize] = insyms ? insyms[i] : i;
122  realsize++;
123  maxbits = FFMAX(maxbits, bits[i]);
124  counts[bits[i]]++;
125  }
126  }
127 
128  codes[0] = 0;
129  for(i = 0; i < 16; i++)
130  codes[i+1] = (codes[i] + counts[i]) << 1;
131  for(i = 0; i < realsize; i++)
132  cw[i] = codes[bits2[i]]++;
133 
134  vlc->table = &table_data[table_offs[num]];
135  vlc->table_allocated = table_offs[num + 1] - table_offs[num];
136  ff_init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize,
137  bits2, 1, 1,
138  cw, 2, 2,
139  syms, 2, 2, INIT_VLC_USE_NEW_STATIC);
140 }
141 
142 /**
143  * Initialize all tables.
144  */
145 static av_cold void rv34_init_tables(void)
146 {
147  int i, j, k;
148 
149  for(i = 0; i < NUM_INTRA_TABLES; i++){
150  for(j = 0; j < 2; j++){
151  rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL, 19*i + 0 + j);
152  rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL, 19*i + 2 + j);
153  rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL, 19*i + 4 + j);
154  for(k = 0; k < 4; k++){
155  rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code, 19*i + 6 + j*4 + k);
156  }
157  }
158  for(j = 0; j < 4; j++){
159  rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL, 19*i + 14 + j);
160  }
161  rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL, 19*i + 18);
162  }
163 
164  for(i = 0; i < NUM_INTER_TABLES; i++){
165  rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL, i*12 + 95);
166  for(j = 0; j < 4; j++){
167  rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code, i*12 + 96 + j);
168  }
169  for(j = 0; j < 2; j++){
170  rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL, i*12 + 100 + j);
171  rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL, i*12 + 102 + j);
172  rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL, i*12 + 104 + j);
173  }
174  rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL, i*12 + 106);
175  }
176 }
177 
178 /** @} */ // vlc group
179 
180 /**
181  * @name RV30/40 4x4 block decoding functions
182  * @{
183  */
184 
185 /**
186  * Decode coded block pattern.
187  */
188 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
189 {
190  int pattern, code, cbp=0;
191  int ones;
192  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
193  static const int shifts[4] = { 0, 2, 8, 10 };
194  const int *curshift = shifts;
195  int i, t, mask;
196 
197  code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2);
198  pattern = code & 0xF;
199  code >>= 4;
200 
201  ones = rv34_count_ones[pattern];
202 
203  for(mask = 8; mask; mask >>= 1, curshift++){
204  if(pattern & mask)
205  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
206  }
207 
208  for(i = 0; i < 4; i++){
209  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
210  if(t == 1)
211  cbp |= cbp_masks[get_bits1(gb)] << i;
212  if(t == 2)
213  cbp |= cbp_masks[2] << i;
214  }
215  return cbp;
216 }
217 
218 /**
219  * Get one coefficient value from the bitstream and store it.
220  */
221 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC* vlc, int q)
222 {
223  if(coef){
224  if(coef == esc){
225  coef = get_vlc2(gb, vlc->table, 9, 2);
226  if(coef > 23){
227  coef -= 23;
228  coef = 22 + ((1 << coef) | get_bits(gb, coef));
229  }
230  coef += esc;
231  }
232  if(get_bits1(gb))
233  coef = -coef;
234  *dst = (coef*q + 8) >> 4;
235  }
236 }
237 
238 /**
239  * Decode 2x2 subblock of coefficients.
240  */
241 static inline void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
242 {
244 
245  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
246  if(is_block2){
247  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
248  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
249  }else{
250  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
251  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
252  }
253  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
254 }
255 
256 /**
257  * Decode a single coefficient.
258  */
259 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
260 {
261  int coeff = modulo_three_table[code] >> 6;
262  decode_coeff(dst, coeff, 3, gb, vlc, q);
263 }
264 
265 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc,
266  int q_dc, int q_ac1, int q_ac2)
267 {
269 
270  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
271  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
272  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
273  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
274 }
275 
276 /**
277  * Decode coefficients for 4x4 block.
278  *
279  * This is done by filling 2x2 subblocks with decoded coefficients
280  * in this order (the same for subblocks and subblock coefficients):
281  * o--o
282  * /
283  * /
284  * o--o
285  */
286 
287 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
288 {
289  int code, pattern, has_ac = 1;
290 
291  code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2);
292 
293  pattern = code & 0x7;
294 
295  code >>= 3;
296 
297  if (modulo_three_table[code] & 0x3F) {
298  decode_subblock3(dst, code, gb, &rvlc->coefficient, q_dc, q_ac1, q_ac2);
299  } else {
300  decode_subblock1(dst, code, gb, &rvlc->coefficient, q_dc);
301  if (!pattern)
302  return 0;
303  has_ac = 0;
304  }
305 
306  if(pattern & 4){
307  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
308  decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
309  }
310  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
311  code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
312  decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
313  }
314  if(pattern & 1){
315  code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
316  decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
317  }
318  return has_ac | pattern;
319 }
320 
321 /**
322  * @name RV30/40 bitstream parsing
323  * @{
324  */
325 
326 /**
327  * Decode starting slice position.
328  * @todo Maybe replace with ff_h263_decode_mba() ?
329  */
331 {
332  int i;
333  for(i = 0; i < 5; i++)
334  if(rv34_mb_max_sizes[i] >= mb_size - 1)
335  break;
336  return rv34_mb_bits_sizes[i];
337 }
338 
339 /**
340  * Select VLC set for decoding from current quantizer, modifier and frame type.
341  */
342 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
343 {
344  if(mod == 2 && quant < 19) quant += 10;
345  else if(mod && quant < 26) quant += 5;
346  return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]]
347  : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]];
348 }
349 
350 /**
351  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
352  */
353 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
354 {
355  MpegEncContext *s = &r->s;
356  GetBitContext *gb = &s->gb;
357  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
358  int t;
359 
360  r->is16 = get_bits1(gb);
361  if(r->is16){
364  t = get_bits(gb, 2);
365  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
366  r->luma_vlc = 2;
367  }else{
368  if(!r->rv30){
369  if(!get_bits1(gb))
370  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
371  }
374  if(r->decode_intra_types(r, gb, intra_types) < 0)
375  return -1;
376  r->luma_vlc = 1;
377  }
378 
379  r->chroma_vlc = 0;
380  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
381 
382  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
383 }
384 
385 /**
386  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
387  */
388 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
389 {
390  MpegEncContext *s = &r->s;
391  GetBitContext *gb = &s->gb;
392  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
393  int i, t;
394 
395  r->block_type = r->decode_mb_info(r);
396  if(r->block_type == -1)
397  return -1;
399  r->mb_type[mb_pos] = r->block_type;
400  if(r->block_type == RV34_MB_SKIP){
401  if(s->pict_type == AV_PICTURE_TYPE_P)
402  r->mb_type[mb_pos] = RV34_MB_P_16x16;
403  if(s->pict_type == AV_PICTURE_TYPE_B)
404  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
405  }
406  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
407  if (rv34_decode_mv(r, r->block_type) < 0)
408  return -1;
409  if(r->block_type == RV34_MB_SKIP){
410  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
411  return 0;
412  }
413  r->chroma_vlc = 1;
414  r->luma_vlc = 0;
415 
416  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
417  if(r->is16){
418  t = get_bits(gb, 2);
419  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
420  r->luma_vlc = 2;
421  }else{
422  if(r->decode_intra_types(r, gb, intra_types) < 0)
423  return -1;
424  r->luma_vlc = 1;
425  }
426  r->chroma_vlc = 0;
427  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
428  }else{
429  for(i = 0; i < 16; i++)
430  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
431  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
432  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
433  r->is16 = 1;
434  r->chroma_vlc = 1;
435  r->luma_vlc = 2;
436  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
437  }
438  }
439 
440  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
441 }
442 
443 /** @} */ //bitstream functions
444 
445 /**
446  * @name motion vector related code (prediction, reconstruction, motion compensation)
447  * @{
448  */
449 
450 /** macroblock partition width in 8x8 blocks */
451 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
452 
453 /** macroblock partition height in 8x8 blocks */
454 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
455 
456 /** availability index for subblocks */
457 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
458 
459 /**
460  * motion vector prediction
461  *
462  * Motion prediction performed for the block by using median prediction of
463  * motion vectors from the left, top and right top blocks but in corner cases
464  * some other vectors may be used instead.
465  */
466 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
467 {
468  MpegEncContext *s = &r->s;
469  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
470  int A[2] = {0}, B[2], C[2];
471  int i, j;
472  int mx, my;
473  int* avail = r->avail_cache + avail_indexes[subblock_no];
474  int c_off = part_sizes_w[block_type];
475 
476  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
477  if(subblock_no == 3)
478  c_off = -1;
479 
480  if(avail[-1]){
481  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
482  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
483  }
484  if(avail[-4]){
485  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
486  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
487  }else{
488  B[0] = A[0];
489  B[1] = A[1];
490  }
491  if(!avail[c_off-4]){
492  if(avail[-4] && (avail[-1] || r->rv30)){
493  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
494  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
495  }else{
496  C[0] = A[0];
497  C[1] = A[1];
498  }
499  }else{
500  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
501  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
502  }
503  mx = mid_pred(A[0], B[0], C[0]);
504  my = mid_pred(A[1], B[1], C[1]);
505  mx += r->dmv[dmv_no][0];
506  my += r->dmv[dmv_no][1];
507  for(j = 0; j < part_sizes_h[block_type]; j++){
508  for(i = 0; i < part_sizes_w[block_type]; i++){
509  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
510  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
511  }
512  }
513 }
514 
515 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
516 
517 /**
518  * Calculate motion vector component that should be added for direct blocks.
519  */
520 static int calc_add_mv(RV34DecContext *r, int dir, int val)
521 {
522  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
523 
524  return (int)(val * (SUINT)mul + 0x2000) >> 14;
525 }
526 
527 /**
528  * Predict motion vector for B-frame macroblock.
529  */
530 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
531  int A_avail, int B_avail, int C_avail,
532  int *mx, int *my)
533 {
534  if(A_avail + B_avail + C_avail != 3){
535  *mx = A[0] + B[0] + C[0];
536  *my = A[1] + B[1] + C[1];
537  if(A_avail + B_avail + C_avail == 2){
538  *mx /= 2;
539  *my /= 2;
540  }
541  }else{
542  *mx = mid_pred(A[0], B[0], C[0]);
543  *my = mid_pred(A[1], B[1], C[1]);
544  }
545 }
546 
547 /**
548  * motion vector prediction for B-frames
549  */
550 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
551 {
552  MpegEncContext *s = &r->s;
553  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
554  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
555  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
556  int has_A = 0, has_B = 0, has_C = 0;
557  int mx, my;
558  int i, j;
559  Picture *cur_pic = s->current_picture_ptr;
560  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
561  int type = cur_pic->mb_type[mb_pos];
562 
563  if((r->avail_cache[6-1] & type) & mask){
564  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
565  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
566  has_A = 1;
567  }
568  if((r->avail_cache[6-4] & type) & mask){
569  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
570  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
571  has_B = 1;
572  }
573  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
574  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
575  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
576  has_C = 1;
577  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
578  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
579  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
580  has_C = 1;
581  }
582 
583  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
584 
585  mx += r->dmv[dir][0];
586  my += r->dmv[dir][1];
587 
588  for(j = 0; j < 2; j++){
589  for(i = 0; i < 2; i++){
590  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
591  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
592  }
593  }
594  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
595  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
596  }
597 }
598 
599 /**
600  * motion vector prediction - RV3 version
601  */
602 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
603 {
604  MpegEncContext *s = &r->s;
605  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
606  int A[2] = {0}, B[2], C[2];
607  int i, j, k;
608  int mx, my;
609  int* avail = r->avail_cache + avail_indexes[0];
610 
611  if(avail[-1]){
612  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
613  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
614  }
615  if(avail[-4]){
616  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
617  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
618  }else{
619  B[0] = A[0];
620  B[1] = A[1];
621  }
622  if(!avail[-4 + 2]){
623  if(avail[-4] && (avail[-1])){
624  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
625  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
626  }else{
627  C[0] = A[0];
628  C[1] = A[1];
629  }
630  }else{
631  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
632  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
633  }
634  mx = mid_pred(A[0], B[0], C[0]);
635  my = mid_pred(A[1], B[1], C[1]);
636  mx += r->dmv[0][0];
637  my += r->dmv[0][1];
638  for(j = 0; j < 2; j++){
639  for(i = 0; i < 2; i++){
640  for(k = 0; k < 2; k++){
641  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
642  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
643  }
644  }
645  }
646 }
647 
648 static const int chroma_coeffs[3] = { 0, 3, 5 };
649 
650 /**
651  * generic motion compensation function
652  *
653  * @param r decoder context
654  * @param block_type type of the current block
655  * @param xoff horizontal offset from the start of the current block
656  * @param yoff vertical offset from the start of the current block
657  * @param mv_off offset to the motion vector information
658  * @param width width of the current partition in 8x8 blocks
659  * @param height height of the current partition in 8x8 blocks
660  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
661  * @param thirdpel motion vectors are specified in 1/3 of pixel
662  * @param qpel_mc a set of functions used to perform luma motion compensation
663  * @param chroma_mc a set of functions used to perform chroma motion compensation
664  */
665 static inline void rv34_mc(RV34DecContext *r, const int block_type,
666  const int xoff, const int yoff, int mv_off,
667  const int width, const int height, int dir,
668  const int thirdpel, int weighted,
669  qpel_mc_func (*qpel_mc)[16],
671 {
672  MpegEncContext *s = &r->s;
673  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
674  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
675  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
676  int is16x16 = 1;
677  int emu = 0;
678 
679  if(thirdpel){
680  int chroma_mx, chroma_my;
681  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
682  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
683  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
684  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
685  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
686  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
687  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
688  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
689  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
690  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
691  }else{
692  int cx, cy;
693  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
694  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
695  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
696  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
697  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
698  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
699  umx = cx >> 2;
700  umy = cy >> 2;
701  uvmx = (cx & 3) << 1;
702  uvmy = (cy & 3) << 1;
703  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
704  if(uvmx == 6 && uvmy == 6)
705  uvmx = uvmy = 4;
706  }
707 
708  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
709  /* wait for the referenced mb row to be finished */
710  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
711  ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
712  ff_thread_await_progress(f, mb_row, 0);
713  }
714 
715  dxy = ly*4 + lx;
716  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
717  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
718  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
719  src_x = s->mb_x * 16 + xoff + mx;
720  src_y = s->mb_y * 16 + yoff + my;
721  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
722  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
723  srcY += src_y * s->linesize + src_x;
724  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
725  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
726  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
727  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
728  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
729  srcY -= 2 + 2*s->linesize;
731  s->linesize, s->linesize,
732  (width << 3) + 6, (height << 3) + 6,
733  src_x - 2, src_y - 2,
734  s->h_edge_pos, s->v_edge_pos);
735  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
736  emu = 1;
737  }
738  if(!weighted){
739  Y = s->dest[0] + xoff + yoff *s->linesize;
740  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
741  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
742  }else{
743  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
744  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
745  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
746  }
747 
748  if(block_type == RV34_MB_P_16x8){
749  qpel_mc[1][dxy](Y, srcY, s->linesize);
750  Y += 8;
751  srcY += 8;
752  }else if(block_type == RV34_MB_P_8x16){
753  qpel_mc[1][dxy](Y, srcY, s->linesize);
754  Y += 8 * s->linesize;
755  srcY += 8 * s->linesize;
756  }
757  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
758  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
759  if (emu) {
760  uint8_t *uvbuf = s->sc.edge_emu_buffer;
761 
762  s->vdsp.emulated_edge_mc(uvbuf, srcU,
763  s->uvlinesize, s->uvlinesize,
764  (width << 2) + 1, (height << 2) + 1,
765  uvsrc_x, uvsrc_y,
766  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
767  srcU = uvbuf;
768  uvbuf += 9*s->uvlinesize;
769 
770  s->vdsp.emulated_edge_mc(uvbuf, srcV,
771  s->uvlinesize, s->uvlinesize,
772  (width << 2) + 1, (height << 2) + 1,
773  uvsrc_x, uvsrc_y,
774  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
775  srcV = uvbuf;
776  }
777  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
778  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
779 }
780 
781 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
782  const int xoff, const int yoff, int mv_off,
783  const int width, const int height, int dir)
784 {
785  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
786  r->rdsp.put_pixels_tab,
788 }
789 
790 static void rv4_weight(RV34DecContext *r)
791 {
793  r->tmp_b_block_y[0],
794  r->tmp_b_block_y[1],
795  r->weight1,
796  r->weight2,
797  r->s.linesize);
799  r->tmp_b_block_uv[0],
800  r->tmp_b_block_uv[2],
801  r->weight1,
802  r->weight2,
803  r->s.uvlinesize);
805  r->tmp_b_block_uv[1],
806  r->tmp_b_block_uv[3],
807  r->weight1,
808  r->weight2,
809  r->s.uvlinesize);
810 }
811 
812 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
813 {
814  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
815 
816  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
817  r->rdsp.put_pixels_tab,
819  if(!weighted){
820  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
821  r->rdsp.avg_pixels_tab,
823  }else{
824  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
825  r->rdsp.put_pixels_tab,
827  rv4_weight(r);
828  }
829 }
830 
832 {
833  int i, j;
834  int weighted = !r->rv30 && r->weight1 != 8192;
835 
836  for(j = 0; j < 2; j++)
837  for(i = 0; i < 2; i++){
838  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
839  weighted,
840  r->rdsp.put_pixels_tab,
842  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
843  weighted,
844  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
846  }
847  if(weighted)
848  rv4_weight(r);
849 }
850 
851 /** number of motion vectors in each macroblock type */
852 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
853 
854 /**
855  * Decode motion vector differences
856  * and perform motion vector reconstruction and motion compensation.
857  */
858 static int rv34_decode_mv(RV34DecContext *r, int block_type)
859 {
860  MpegEncContext *s = &r->s;
861  GetBitContext *gb = &s->gb;
862  int i, j, k, l;
863  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
864  int next_bt;
865 
866  memset(r->dmv, 0, sizeof(r->dmv));
867  for(i = 0; i < num_mvs[block_type]; i++){
868  r->dmv[i][0] = get_interleaved_se_golomb(gb);
869  r->dmv[i][1] = get_interleaved_se_golomb(gb);
870  if (r->dmv[i][0] == INVALID_VLC ||
871  r->dmv[i][1] == INVALID_VLC) {
872  r->dmv[i][0] = r->dmv[i][1] = 0;
873  return AVERROR_INVALIDDATA;
874  }
875  }
876  switch(block_type){
877  case RV34_MB_TYPE_INTRA:
879  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
880  return 0;
881  case RV34_MB_SKIP:
882  if(s->pict_type == AV_PICTURE_TYPE_P){
883  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
884  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
885  break;
886  }
887  case RV34_MB_B_DIRECT:
888  //surprisingly, it uses motion scheme from next reference frame
889  /* wait for the current mb row to be finished */
890  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
892 
893  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
894  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
895  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
896  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
897  }else
898  for(j = 0; j < 2; j++)
899  for(i = 0; i < 2; i++)
900  for(k = 0; k < 2; k++)
901  for(l = 0; l < 2; l++)
902  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
903  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
904  rv34_mc_2mv(r, block_type);
905  else
906  rv34_mc_2mv_skip(r);
907  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
908  break;
909  case RV34_MB_P_16x16:
910  case RV34_MB_P_MIX16x16:
911  rv34_pred_mv(r, block_type, 0, 0);
912  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
913  break;
914  case RV34_MB_B_FORWARD:
915  case RV34_MB_B_BACKWARD:
916  r->dmv[1][0] = r->dmv[0][0];
917  r->dmv[1][1] = r->dmv[0][1];
918  if(r->rv30)
919  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
920  else
921  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
922  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
923  break;
924  case RV34_MB_P_16x8:
925  case RV34_MB_P_8x16:
926  rv34_pred_mv(r, block_type, 0, 0);
927  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
928  if(block_type == RV34_MB_P_16x8){
929  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
930  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
931  }
932  if(block_type == RV34_MB_P_8x16){
933  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
934  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
935  }
936  break;
937  case RV34_MB_B_BIDIR:
938  rv34_pred_mv_b (r, block_type, 0);
939  rv34_pred_mv_b (r, block_type, 1);
940  rv34_mc_2mv (r, block_type);
941  break;
942  case RV34_MB_P_8x8:
943  for(i=0;i< 4;i++){
944  rv34_pred_mv(r, block_type, i, i);
945  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
946  }
947  break;
948  }
949 
950  return 0;
951 }
952 /** @} */ // mv group
953 
954 /**
955  * @name Macroblock reconstruction functions
956  * @{
957  */
958 /** mapping of RV30/40 intra prediction types to standard H.264 types */
959 static const int ittrans[9] = {
962 };
963 
964 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
965 static const int ittrans16[4] = {
967 };
968 
969 /**
970  * Perform 4x4 intra prediction.
971  */
972 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
973 {
974  uint8_t *prev = dst - stride + 4;
975  uint32_t topleft;
976 
977  if(!up && !left)
978  itype = DC_128_PRED;
979  else if(!up){
980  if(itype == VERT_PRED) itype = HOR_PRED;
981  if(itype == DC_PRED) itype = LEFT_DC_PRED;
982  }else if(!left){
983  if(itype == HOR_PRED) itype = VERT_PRED;
984  if(itype == DC_PRED) itype = TOP_DC_PRED;
986  }
987  if(!down){
989  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
990  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
991  }
992  if(!right && up){
993  topleft = dst[-stride + 3] * 0x01010101u;
994  prev = (uint8_t*)&topleft;
995  }
996  r->h.pred4x4[itype](dst, prev, stride);
997 }
998 
999 static inline int adjust_pred16(int itype, int up, int left)
1000 {
1001  if(!up && !left)
1002  itype = DC_128_PRED8x8;
1003  else if(!up){
1004  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1005  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1006  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1007  }else if(!left){
1008  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1009  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1010  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1011  }
1012  return itype;
1013 }
1014 
1015 static inline void rv34_process_block(RV34DecContext *r,
1016  uint8_t *pdst, int stride,
1017  int fc, int sc, int q_dc, int q_ac)
1018 {
1019  MpegEncContext *s = &r->s;
1020  int16_t *ptr = s->block[0];
1021  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1022  fc, sc, q_dc, q_ac, q_ac);
1023  if(has_ac){
1024  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1025  }else{
1026  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1027  ptr[0] = 0;
1028  }
1029 }
1030 
1031 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1032 {
1033  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1034  MpegEncContext *s = &r->s;
1035  GetBitContext *gb = &s->gb;
1036  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1037  q_ac = rv34_qscale_tab[s->qscale];
1038  uint8_t *dst = s->dest[0];
1039  int16_t *ptr = s->block[0];
1040  int i, j, itype, has_ac;
1041 
1042  memset(block16, 0, 16 * sizeof(*block16));
1043 
1044  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1045  if(has_ac)
1046  r->rdsp.rv34_inv_transform(block16);
1047  else
1048  r->rdsp.rv34_inv_transform_dc(block16);
1049 
1050  itype = ittrans16[intra_types[0]];
1051  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1052  r->h.pred16x16[itype](dst, s->linesize);
1053 
1054  for(j = 0; j < 4; j++){
1055  for(i = 0; i < 4; i++, cbp >>= 1){
1056  int dc = block16[i + j*4];
1057 
1058  if(cbp & 1){
1059  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1060  }else
1061  has_ac = 0;
1062 
1063  if(has_ac){
1064  ptr[0] = dc;
1065  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1066  }else
1067  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1068  }
1069 
1070  dst += 4*s->linesize;
1071  }
1072 
1073  itype = ittrans16[intra_types[0]];
1074  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1075  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1076 
1077  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1078  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1079 
1080  for(j = 1; j < 3; j++){
1081  dst = s->dest[j];
1082  r->h.pred8x8[itype](dst, s->uvlinesize);
1083  for(i = 0; i < 4; i++, cbp >>= 1){
1084  uint8_t *pdst;
1085  if(!(cbp & 1)) continue;
1086  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1087 
1088  rv34_process_block(r, pdst, s->uvlinesize,
1089  r->chroma_vlc, 1, q_dc, q_ac);
1090  }
1091  }
1092 }
1093 
1094 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1095 {
1096  MpegEncContext *s = &r->s;
1097  uint8_t *dst = s->dest[0];
1098  int avail[6*8] = {0};
1099  int i, j, k;
1100  int idx, q_ac, q_dc;
1101 
1102  // Set neighbour information.
1103  if(r->avail_cache[1])
1104  avail[0] = 1;
1105  if(r->avail_cache[2])
1106  avail[1] = avail[2] = 1;
1107  if(r->avail_cache[3])
1108  avail[3] = avail[4] = 1;
1109  if(r->avail_cache[4])
1110  avail[5] = 1;
1111  if(r->avail_cache[5])
1112  avail[8] = avail[16] = 1;
1113  if(r->avail_cache[9])
1114  avail[24] = avail[32] = 1;
1115 
1116  q_ac = rv34_qscale_tab[s->qscale];
1117  for(j = 0; j < 4; j++){
1118  idx = 9 + j*8;
1119  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1120  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1121  avail[idx] = 1;
1122  if(!(cbp & 1)) continue;
1123 
1124  rv34_process_block(r, dst, s->linesize,
1125  r->luma_vlc, 0, q_ac, q_ac);
1126  }
1127  dst += s->linesize * 4 - 4*4;
1128  intra_types += r->intra_types_stride;
1129  }
1130 
1131  intra_types -= r->intra_types_stride * 4;
1132 
1133  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1134  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1135 
1136  for(k = 0; k < 2; k++){
1137  dst = s->dest[1+k];
1138  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1139 
1140  for(j = 0; j < 2; j++){
1141  int* acache = r->avail_cache + 6 + j*4;
1142  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1143  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1144  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1145  acache[0] = 1;
1146 
1147  if(!(cbp&1)) continue;
1148 
1149  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1150  r->chroma_vlc, 1, q_dc, q_ac);
1151  }
1152 
1153  dst += 4*s->uvlinesize;
1154  }
1155  }
1156 }
1157 
1158 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1159 {
1160  int d;
1161  d = motion_val[0][0] - motion_val[-step][0];
1162  if(d < -3 || d > 3)
1163  return 1;
1164  d = motion_val[0][1] - motion_val[-step][1];
1165  if(d < -3 || d > 3)
1166  return 1;
1167  return 0;
1168 }
1169 
1171 {
1172  MpegEncContext *s = &r->s;
1173  int hmvmask = 0, vmvmask = 0, i, j;
1174  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1175  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1176  for(j = 0; j < 16; j += 8){
1177  for(i = 0; i < 2; i++){
1178  if(is_mv_diff_gt_3(motion_val + i, 1))
1179  vmvmask |= 0x11 << (j + i*2);
1180  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1181  hmvmask |= 0x03 << (j + i*2);
1182  }
1183  motion_val += s->b8_stride;
1184  }
1185  if(s->first_slice_line)
1186  hmvmask &= ~0x000F;
1187  if(!s->mb_x)
1188  vmvmask &= ~0x1111;
1189  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1190  vmvmask |= (vmvmask & 0x4444) >> 1;
1191  hmvmask |= (hmvmask & 0x0F00) >> 4;
1192  if(s->mb_x)
1193  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1194  if(!s->first_slice_line)
1195  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1196  }
1197  return hmvmask | vmvmask;
1198 }
1199 
1200 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1201 {
1202  MpegEncContext *s = &r->s;
1203  GetBitContext *gb = &s->gb;
1204  uint8_t *dst = s->dest[0];
1205  int16_t *ptr = s->block[0];
1206  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1207  int cbp, cbp2;
1208  int q_dc, q_ac, has_ac;
1209  int i, j;
1210  int dist;
1211 
1212  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1213  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1214  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1215  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1216  if(s->mb_x && dist)
1217  r->avail_cache[5] =
1218  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1219  if(dist >= s->mb_width)
1220  r->avail_cache[2] =
1221  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1222  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1223  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1224  if(s->mb_x && dist > s->mb_width)
1225  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1226 
1227  s->qscale = r->si.quant;
1228  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1229  r->cbp_luma [mb_pos] = cbp;
1230  r->cbp_chroma[mb_pos] = cbp >> 16;
1231  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1232  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1233 
1234  if(cbp == -1)
1235  return -1;
1236 
1237  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1238  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1239  else rv34_output_intra(r, intra_types, cbp);
1240  return 0;
1241  }
1242 
1243  if(r->is16){
1244  // Only for RV34_MB_P_MIX16x16
1245  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1246  memset(block16, 0, 16 * sizeof(*block16));
1247  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1248  q_ac = rv34_qscale_tab[s->qscale];
1249  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1250  r->rdsp.rv34_inv_transform(block16);
1251  else
1252  r->rdsp.rv34_inv_transform_dc(block16);
1253 
1254  q_ac = rv34_qscale_tab[s->qscale];
1255 
1256  for(j = 0; j < 4; j++){
1257  for(i = 0; i < 4; i++, cbp >>= 1){
1258  int dc = block16[i + j*4];
1259 
1260  if(cbp & 1){
1261  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1262  }else
1263  has_ac = 0;
1264 
1265  if(has_ac){
1266  ptr[0] = dc;
1267  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1268  }else
1269  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1270  }
1271 
1272  dst += 4*s->linesize;
1273  }
1274 
1275  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1276  }else{
1277  q_ac = rv34_qscale_tab[s->qscale];
1278 
1279  for(j = 0; j < 4; j++){
1280  for(i = 0; i < 4; i++, cbp >>= 1){
1281  if(!(cbp & 1)) continue;
1282 
1283  rv34_process_block(r, dst + 4*i, s->linesize,
1284  r->luma_vlc, 0, q_ac, q_ac);
1285  }
1286  dst += 4*s->linesize;
1287  }
1288  }
1289 
1290  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1291  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1292 
1293  for(j = 1; j < 3; j++){
1294  dst = s->dest[j];
1295  for(i = 0; i < 4; i++, cbp >>= 1){
1296  uint8_t *pdst;
1297  if(!(cbp & 1)) continue;
1298  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1299 
1300  rv34_process_block(r, pdst, s->uvlinesize,
1301  r->chroma_vlc, 1, q_dc, q_ac);
1302  }
1303  }
1304 
1305  return 0;
1306 }
1307 
1308 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1309 {
1310  MpegEncContext *s = &r->s;
1311  int cbp, dist;
1312  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1313 
1314  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1315  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1316  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1317  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1318  if(s->mb_x && dist)
1319  r->avail_cache[5] =
1320  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1321  if(dist >= s->mb_width)
1322  r->avail_cache[2] =
1323  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1324  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1325  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1326  if(s->mb_x && dist > s->mb_width)
1327  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1328 
1329  s->qscale = r->si.quant;
1330  cbp = rv34_decode_intra_mb_header(r, intra_types);
1331  r->cbp_luma [mb_pos] = cbp;
1332  r->cbp_chroma[mb_pos] = cbp >> 16;
1333  r->deblock_coefs[mb_pos] = 0xFFFF;
1334  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1335 
1336  if(cbp == -1)
1337  return -1;
1338 
1339  if(r->is16){
1340  rv34_output_i16x16(r, intra_types, cbp);
1341  return 0;
1342  }
1343 
1344  rv34_output_intra(r, intra_types, cbp);
1345  return 0;
1346 }
1347 
1349 {
1350  int bits;
1351  if(s->mb_y >= s->mb_height)
1352  return 1;
1353  if(!s->mb_num_left)
1354  return 1;
1355  if(r->s.mb_skip_run > 1)
1356  return 0;
1357  bits = get_bits_left(&s->gb);
1358  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1359  return 1;
1360  return 0;
1361 }
1362 
1363 
1365 {
1367  r->intra_types = NULL;
1369  av_freep(&r->mb_type);
1370  av_freep(&r->cbp_luma);
1371  av_freep(&r->cbp_chroma);
1372  av_freep(&r->deblock_coefs);
1373 }
1374 
1375 
1377 {
1378  r->intra_types_stride = r->s.mb_width * 4 + 4;
1379 
1380  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1381  sizeof(*r->cbp_chroma));
1382  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1383  sizeof(*r->cbp_luma));
1385  sizeof(*r->deblock_coefs));
1387  sizeof(*r->intra_types_hist));
1388  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1389  sizeof(*r->mb_type));
1390 
1391  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1392  r->intra_types_hist && r->mb_type)) {
1393  rv34_decoder_free(r);
1394  return AVERROR(ENOMEM);
1395  }
1396 
1398 
1399  return 0;
1400 }
1401 
1402 
1404 {
1405  rv34_decoder_free(r);
1406  return rv34_decoder_alloc(r);
1407 }
1408 
1409 
1410 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1411 {
1412  MpegEncContext *s = &r->s;
1413  GetBitContext *gb = &s->gb;
1414  int mb_pos, slice_type;
1415  int res;
1416 
1417  init_get_bits(&r->s.gb, buf, buf_size*8);
1418  res = r->parse_slice_header(r, gb, &r->si);
1419  if(res < 0){
1420  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1421  return -1;
1422  }
1423 
1424  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1425  if (slice_type != s->pict_type) {
1426  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1427  return AVERROR_INVALIDDATA;
1428  }
1429  if (s->width != r->si.width || s->height != r->si.height) {
1430  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1431  return AVERROR_INVALIDDATA;
1432  }
1433 
1434  r->si.end = end;
1435  s->qscale = r->si.quant;
1436  s->mb_num_left = r->si.end - r->si.start;
1437  r->s.mb_skip_run = 0;
1438 
1439  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1440  if(r->si.start != mb_pos){
1441  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1442  s->mb_x = r->si.start % s->mb_width;
1443  s->mb_y = r->si.start / s->mb_width;
1444  }
1445  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1446  s->first_slice_line = 1;
1447  s->resync_mb_x = s->mb_x;
1448  s->resync_mb_y = s->mb_y;
1449 
1451  while(!check_slice_end(r, s)) {
1453 
1454  if(r->si.type)
1455  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1456  else
1457  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1458  if(res < 0){
1459  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1460  return -1;
1461  }
1462  if (++s->mb_x == s->mb_width) {
1463  s->mb_x = 0;
1464  s->mb_y++;
1466 
1467  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1468  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1469 
1470  if(r->loop_filter && s->mb_y >= 2)
1471  r->loop_filter(r, s->mb_y - 2);
1472 
1473  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1475  s->mb_y - 2, 0);
1476 
1477  }
1478  if(s->mb_x == s->resync_mb_x)
1479  s->first_slice_line=0;
1480  s->mb_num_left--;
1481  }
1482  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1483 
1484  return s->mb_y == s->mb_height;
1485 }
1486 
1487 /** @} */ // reconstruction group end
1488 
1489 /**
1490  * Initialize decoder.
1491  */
1493 {
1494  RV34DecContext *r = avctx->priv_data;
1495  MpegEncContext *s = &r->s;
1496  int ret;
1497 
1499  ff_mpv_decode_init(s, avctx);
1500  s->out_format = FMT_H263;
1501 
1502  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1503  avctx->has_b_frames = 1;
1504  s->low_delay = 0;
1505 
1506  ff_mpv_idct_init(s);
1507  if ((ret = ff_mpv_common_init(s)) < 0)
1508  return ret;
1509 
1510  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1511 
1512 #if CONFIG_RV30_DECODER
1513  if (avctx->codec_id == AV_CODEC_ID_RV30)
1514  ff_rv30dsp_init(&r->rdsp);
1515 #endif
1516 #if CONFIG_RV40_DECODER
1517  if (avctx->codec_id == AV_CODEC_ID_RV40)
1518  ff_rv40dsp_init(&r->rdsp);
1519 #endif
1520 
1521  if ((ret = rv34_decoder_alloc(r)) < 0) {
1522  ff_mpv_common_end(&r->s);
1523  return ret;
1524  }
1525 
1526  if(!intra_vlcs[0].cbppattern[0].bits)
1527  rv34_init_tables();
1528 
1529  avctx->internal->allocate_progress = 1;
1530 
1531  return 0;
1532 }
1533 
1535 {
1536  int err;
1537  RV34DecContext *r = avctx->priv_data;
1538 
1539  r->s.avctx = avctx;
1540 
1541  if (avctx->internal->is_copy) {
1542  r->tmp_b_block_base = NULL;
1543  r->cbp_chroma = NULL;
1544  r->cbp_luma = NULL;
1545  r->deblock_coefs = NULL;
1546  r->intra_types_hist = NULL;
1547  r->mb_type = NULL;
1548 
1549  ff_mpv_idct_init(&r->s);
1550 
1551  if ((err = ff_mpv_common_init(&r->s)) < 0)
1552  return err;
1553  if ((err = rv34_decoder_alloc(r)) < 0) {
1554  ff_mpv_common_end(&r->s);
1555  return err;
1556  }
1557  }
1558 
1559  return 0;
1560 }
1561 
1563 {
1564  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1565  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1566  int err;
1567 
1568  if (dst == src || !s1->context_initialized)
1569  return 0;
1570 
1571  if (s->height != s1->height || s->width != s1->width) {
1572  s->height = s1->height;
1573  s->width = s1->width;
1574  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1575  return err;
1576  if ((err = rv34_decoder_realloc(r)) < 0)
1577  return err;
1578  }
1579 
1580  r->cur_pts = r1->cur_pts;
1581  r->last_pts = r1->last_pts;
1582  r->next_pts = r1->next_pts;
1583 
1584  memset(&r->si, 0, sizeof(r->si));
1585 
1586  // Do no call ff_mpeg_update_thread_context on a partially initialized
1587  // decoder context.
1588  if (!s1->context_initialized)
1589  return 0;
1590 
1591  return ff_mpeg_update_thread_context(dst, src);
1592 }
1593 
1594 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1595 {
1596  if (n < slice_count) {
1597  if(avctx->slice_count) return avctx->slice_offset[n];
1598  else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1599  } else
1600  return buf_size;
1601 }
1602 
1603 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1604 {
1605  RV34DecContext *r = avctx->priv_data;
1606  MpegEncContext *s = &r->s;
1607  int got_picture = 0, ret;
1608 
1609  ff_er_frame_end(&s->er);
1610  ff_mpv_frame_end(s);
1611  s->mb_num_left = 0;
1612 
1613  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1615 
1616  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1617  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1618  return ret;
1621  got_picture = 1;
1622  } else if (s->last_picture_ptr) {
1623  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1624  return ret;
1627  got_picture = 1;
1628  }
1629 
1630  return got_picture;
1631 }
1632 
1633 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1634 {
1635  // attempt to keep aspect during typical resolution switches
1636  if (!sar.num)
1637  sar = (AVRational){1, 1};
1638 
1639  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1640  return sar;
1641 }
1642 
1644  void *data, int *got_picture_ptr,
1645  AVPacket *avpkt)
1646 {
1647  const uint8_t *buf = avpkt->data;
1648  int buf_size = avpkt->size;
1649  RV34DecContext *r = avctx->priv_data;
1650  MpegEncContext *s = &r->s;
1651  AVFrame *pict = data;
1652  SliceInfo si;
1653  int i, ret;
1654  int slice_count;
1655  const uint8_t *slices_hdr = NULL;
1656  int last = 0;
1657  int faulty_b = 0;
1658  int offset;
1659 
1660  /* no supplementary picture */
1661  if (buf_size == 0) {
1662  /* special case for last picture */
1663  if (s->low_delay==0 && s->next_picture_ptr) {
1664  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1665  return ret;
1666  s->next_picture_ptr = NULL;
1667 
1668  *got_picture_ptr = 1;
1669  }
1670  return 0;
1671  }
1672 
1673  if(!avctx->slice_count){
1674  slice_count = (*buf++) + 1;
1675  slices_hdr = buf + 4;
1676  buf += 8 * slice_count;
1677  buf_size -= 1 + 8 * slice_count;
1678  }else
1679  slice_count = avctx->slice_count;
1680 
1681  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1682  //parse first slice header to check whether this frame can be decoded
1683  if(offset < 0 || offset > buf_size){
1684  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1685  return AVERROR_INVALIDDATA;
1686  }
1687  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1688  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1689  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1690  return AVERROR_INVALIDDATA;
1691  }
1692  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1693  si.type == AV_PICTURE_TYPE_B) {
1694  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1695  "reference data.\n");
1696  faulty_b = 1;
1697  }
1698  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1699  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1700  || avctx->skip_frame >= AVDISCARD_ALL)
1701  return avpkt->size;
1702 
1703  /* first slice */
1704  if (si.start == 0) {
1705  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1706  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1707  s->mb_num_left);
1708  ff_er_frame_end(&s->er);
1709  ff_mpv_frame_end(s);
1710  }
1711 
1712  if (s->width != si.width || s->height != si.height) {
1713  int err;
1714 
1715  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1716  si.width, si.height);
1717 
1718  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1719  return AVERROR_INVALIDDATA;
1720 
1722  s->width, s->height, s->avctx->sample_aspect_ratio,
1723  si.width, si.height);
1724  s->width = si.width;
1725  s->height = si.height;
1726 
1727  err = ff_set_dimensions(s->avctx, s->width, s->height);
1728  if (err < 0)
1729  return err;
1730 
1731  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1732  return err;
1733  if ((err = rv34_decoder_realloc(r)) < 0)
1734  return err;
1735  }
1736  if (faulty_b)
1737  return AVERROR_INVALIDDATA;
1738  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1739  if (ff_mpv_frame_start(s, s->avctx) < 0)
1740  return -1;
1742  if (!r->tmp_b_block_base) {
1743  int i;
1744 
1745  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1746  for (i = 0; i < 2; i++)
1747  r->tmp_b_block_y[i] = r->tmp_b_block_base
1748  + i * 16 * s->linesize;
1749  for (i = 0; i < 4; i++)
1750  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1751  + (i >> 1) * 8 * s->uvlinesize
1752  + (i & 1) * 16;
1753  }
1754  r->cur_pts = si.pts;
1755  if (s->pict_type != AV_PICTURE_TYPE_B) {
1756  r->last_pts = r->next_pts;
1757  r->next_pts = r->cur_pts;
1758  } else {
1759  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1760  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1761  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1762 
1763  if(!refdist){
1764  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1765  r->scaled_weight = 0;
1766  }else{
1767  if (FFMAX(dist0, dist1) > refdist)
1768  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1769 
1770  r->mv_weight1 = (dist0 << 14) / refdist;
1771  r->mv_weight2 = (dist1 << 14) / refdist;
1772  if((r->mv_weight1|r->mv_weight2) & 511){
1773  r->weight1 = r->mv_weight1;
1774  r->weight2 = r->mv_weight2;
1775  r->scaled_weight = 0;
1776  }else{
1777  r->weight1 = r->mv_weight1 >> 9;
1778  r->weight2 = r->mv_weight2 >> 9;
1779  r->scaled_weight = 1;
1780  }
1781  }
1782  }
1783  s->mb_x = s->mb_y = 0;
1785  } else if (HAVE_THREADS &&
1787  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1788  "multithreading mode (start MB is %d).\n", si.start);
1789  return AVERROR_INVALIDDATA;
1790  }
1791 
1792  for(i = 0; i < slice_count; i++){
1793  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1794  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1795  int size;
1796 
1797  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1798  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1799  break;
1800  }
1801  size = offset1 - offset;
1802 
1803  r->si.end = s->mb_width * s->mb_height;
1804  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1805 
1806  if(i+1 < slice_count){
1807  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1808  if (offset2 < offset1 || offset2 > buf_size) {
1809  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1810  break;
1811  }
1812  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1813  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1814  size = offset2 - offset;
1815  }else
1816  r->si.end = si.start;
1817  }
1818  av_assert0 (size >= 0 && size <= buf_size - offset);
1819  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1820  if(last)
1821  break;
1822  }
1823 
1824  if (s->current_picture_ptr) {
1825  if (last) {
1826  if(r->loop_filter)
1827  r->loop_filter(r, s->mb_height - 1);
1828 
1829  ret = finish_frame(avctx, pict);
1830  if (ret < 0)
1831  return ret;
1832  *got_picture_ptr = ret;
1833  } else if (HAVE_THREADS &&
1835  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1836  /* always mark the current frame as finished, frame-mt supports
1837  * only complete frames */
1838  ff_er_frame_end(&s->er);
1839  ff_mpv_frame_end(s);
1840  s->mb_num_left = 0;
1842  return AVERROR_INVALIDDATA;
1843  }
1844  }
1845 
1846  return avpkt->size;
1847 }
1848 
1850 {
1851  RV34DecContext *r = avctx->priv_data;
1852 
1853  ff_mpv_common_end(&r->s);
1854  rv34_decoder_free(r);
1855 
1856  return 0;
1857 }
qpel_mc_func put_pixels_tab[4][16]
Definition: rv34dsp.h:58
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:54
void ff_rv40dsp_init(RV34DSPContext *c)
Definition: rv40dsp.c:620
#define VERT_PRED8x8
Definition: h264pred.h:70
#define NULL
Definition: coverity.c:32
int vlc_set
VLCs used for this slice.
Definition: rv34.h:76
const char const char void * val
Definition: avisynth_c.h:863
VLC second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:67
discard all frames except keyframes
Definition: avcodec.h:815
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2274
Definition: vp9.h:47
#define MB_TYPE_L1
Definition: mpegutils.h:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
#define DC_128_PRED8x8
Definition: h264pred.h:76
int last_pts
Definition: rv34.h:107
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:51
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1031
uint8_t * tmp_b_block_y[2]
temporary blocks for RV4 weighted MC
Definition: rv34.h:122
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:59
uint32_t avail_cache[3 *4]
8x8 block available flags (for MV prediction)
Definition: rv34.h:119
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define ER_MB_END
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:959
B-frame macroblock, forward prediction.
Definition: rv34.h:47
int dmv[4][2]
differential motion vectors for the current macroblock
Definition: rv34.h:102
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:53
MpegEncContext s
Definition: rv34.h:85
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:287
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:353
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
void ff_er_frame_end(ERContext *s)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
int height
coded height
Definition: rv34.h:79
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1483
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:50
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1949
#define INVALID_VLC
Definition: golomb.h:38
#define MB_TYPE_INTRA
Definition: mpegutils.h:73
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1780
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation...
Definition: rv34.c:858
#define MB_TYPE_16x8
Definition: mpegutils.h:55
GLint GLenum type
Definition: opengl_enc.c:104
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
mpegvideo header.
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:530
VLC cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:64
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1603
int weight2
B-frame distance fractions (0.14) used in motion compensation.
Definition: rv34.h:109
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:515
RV30 and RV40 decoder common data declarations.
discard all
Definition: avcodec.h:816
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1410
#define SUINT
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:972
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
#define src
Definition: vp8dsp.c:254
int start
Definition: rv34.h:77
#define HOR_PRED8x8
Definition: h264pred.h:69
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:53
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
int qscale
QP.
Definition: mpegvideo.h:204
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
const uint8_t * luma_dc_quant_p
luma subblock DC quantizer for interframes
Definition: rv34.h:91
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:265
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3045
#define PLANE_PRED8x8
Definition: h264pred.h:71
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t * tmp_b_block_base
Definition: rv34.h:124
int mb_num_left
number of MBs left in this video packet (for partitioned Slices only)
Definition: mpegvideo.h:359
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms, const int num)
Generate VLC from codeword lengths.
Definition: rv34.c:109
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1015
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
#define DC_PRED8x8
Definition: h264pred.h:68
int scaled_weight
Definition: rv34.h:108
uint16_t * cbp_luma
CBP values for luma subblocks.
Definition: rv34.h:114
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:119
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
static const int chroma_coeffs[3]
Definition: rv34.c:648
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:831
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1054
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Multithreading support functions.
Definition: vp9.h:46
int width
coded width
Definition: rv34.h:78
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:36
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
Definition: rv34dsp.h:67
uint16_t * deblock_coefs
deblock coefficients for each macroblock
Definition: rv34.h:116
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
quarterpel DSP functions
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:454
#define height
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:69
uint8_t * data
Definition: avcodec.h:1482
static const uint8_t bits2[81]
Definition: aactab.c:140
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1158
Skipped block.
Definition: rv34.h:49
static VLC_TYPE table_data[117592][2]
Definition: rv34.c:99
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
ptrdiff_t size
Definition: opengl_enc.c:100
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1094
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
rv34_idct_add_func rv34_idct_add
Definition: rv34dsp.h:70
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:52
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:740
static const uint16_t table[]
Definition: prosumer.c:206
static const uint8_t rv34_quant_to_vlc_set[2][31]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, VLC *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:259
int slice_count
slice count
Definition: avcodec.h:1924
ThreadFrame tf
Definition: mpegpicture.h:47
#define U(x)
Definition: vp56_arith.h:37
int quant
quantizer used for this slice
Definition: rv34.h:75
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:551
static const int table_offs[]
Definition: rv34.c:78
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
qpel_mc_func avg_pixels_tab[4][16]
Definition: rv34dsp.h:59
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1860
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:790
#define MB_TYPE_8x16
Definition: mpegutils.h:56
#define chroma_mc(a)
Definition: vc1dsp.c:783
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:965
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
static const uint16_t mask[17]
Definition: lzw.c:38
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:136
rv34_idct_dc_add_func rv34_idct_dc_add
Definition: rv34dsp.h:71
av_cold void ff_rv30dsp_init(RV34DSPContext *c)
Definition: rv30dsp.c:265
#define B
Definition: huffyuvdsp.h:32
ERContext er
Definition: mpegvideo.h:566
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2848
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:812
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:466
const char * r
Definition: vf_curves.c:114
int luma_vlc
which VLC set will be used for decoding of luma blocks
Definition: rv34.h:99
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1633
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
#define IS_SKIP(a)
Definition: mpegutils.h:81
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1403
uint8_t bits
Definition: vp3data.h:202
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:406
GetBitContext gb
Definition: mpegvideo.h:448
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1133
#define FFMAX(a, b)
Definition: common.h:94
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
rv34_inv_transform_func rv34_inv_transform_dc
Definition: rv34dsp.h:69
VLC tables used by the decoder.
Definition: rv34.h:63
Definition: vlc.h:26
int end
start and end macroblocks of the slice
Definition: rv34.h:77
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1170
common internal API header
useful rectangle filling function
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1594
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:46
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
int(* parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
Definition: rv34.h:126
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1446
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:44
#define IS_16X8(a)
Definition: mpegutils.h:87
#define Y
Definition: boxblur.h:38
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:550
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2840
#define FFMIN(a, b)
Definition: common.h:96
int * mb_type
internal macroblock types
Definition: rv34.h:97
#define width
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:999
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
Picture.
Definition: mpegpicture.h:45
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:520
H264PredContext h
functions for 4x4 and 16x16 intra block prediction
Definition: rv34.h:94
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:342
VLC coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:69
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1376
VLC first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:66
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
int mv_weight1
Definition: rv34.h:110
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int n
Definition: avisynth_c.h:760
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:827
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:495
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:671
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:781
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
int bits
Definition: vlc.h:27
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
RV30/40 VLC tables.
int table_allocated
Definition: vlc.h:29
int(* decode_mb_info)(struct RV34DecContext *r)
Definition: rv34.h:127
#define MB_TYPE_8x8
Definition: mpegutils.h:57
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
Definition: rv34.c:1534
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:665
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1308
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
essential slice information
Definition: rv34.h:73
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
enum AVCodecID codec_id
Definition: avcodec.h:1580
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:388
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:474
main external API structure.
Definition: avcodec.h:1570
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:451
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
RV34VLC * cur_vlcs
VLC set used for current frame decoding.
Definition: rv34.h:93
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
SliceInfo si
current slice information
Definition: rv34.h:95
void * buf
Definition: avisynth_c.h:766
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1439
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:46
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1364
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:65
Rational number (pair of numerator and denominator).
Definition: rational.h:58
struct AVFrame * f
Definition: mpegpicture.h:46
#define IS_8X16(a)
Definition: mpegutils.h:88
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:145
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1492
#define mid_pred
Definition: mathops.h:97
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1643
#define s1
Definition: regdef.h:38
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1207
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1200
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
int intra_types_stride
block types array stride
Definition: rv34.h:89
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
miscellaneous RV30/40 tables
int(* decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst)
Definition: rv34.h:128
const uint8_t * quant
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:602
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1348
int is16
current block has additional 16x16 specific features or not
Definition: rv34.h:101
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
int8_t * intra_types
block types
Definition: rv34.h:88
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
P-frame macroblock, one motion frame.
Definition: rv34.h:45
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
int cur_pts
Definition: rv34.h:107
Definition: vp9.h:48
static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:188
MpegEncContext.
Definition: mpegvideo.h:81
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:183
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1849
int8_t * qscale_table
Definition: mpegpicture.h:50
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
int weight1
Definition: rv34.h:109
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
discard all non reference
Definition: avcodec.h:812
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define MB_TYPE_L0L1
Definition: mpegutils.h:69
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:457
uint8_t * tmp_b_block_uv[4]
Definition: rv34.h:123
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:676
int mv_weight2
Definition: rv34.h:110
uint8_t * dest[3]
Definition: mpegvideo.h:295
B-frame macroblock, backward prediction.
Definition: rv34.h:48
#define INIT_VLC_USE_NEW_STATIC
Definition: vlc.h:55
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:124
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1562
static const uint8_t shifts[2][12]
Definition: camellia.c:174
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
Bi-dir predicted.
Definition: avutil.h:276
VLC third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:68
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:852
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:330
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
#define IS_INTRA(x, y)
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
decoder context
Definition: rv34.h:84
void * priv_data
Definition: avcodec.h:1597
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:894
VideoDSPContext vdsp
Definition: mpegvideo.h:236
#define IS_8X8(a)
Definition: mpegutils.h:89
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1431
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1605
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
int block_type
current block type
Definition: rv34.h:98
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
int next_pts
Definition: rv34.h:107
const uint8_t * luma_dc_quant_i
luma subblock DC quantizer for intraframes
Definition: rv34.h:90
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int * slice_offset
slice offsets in the frame in bytes
Definition: avcodec.h:1940
int8_t * intra_types_hist
old block types, used for prediction
Definition: rv34.h:87
rv34_inv_transform_func rv34_inv_transform
Definition: rv34dsp.h:68
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:241
#define av_freep(p)
int type
slice type (intra, inter)
Definition: rv34.h:74
h264_chroma_mc_func avg_chroma_pixels_tab[3]
Definition: rv34dsp.h:61
#define VLC_TYPE
Definition: vlc.h:24
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, VLC *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:221
#define stride
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:81
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:69
int rv30
indicates which RV variant is currently decoded
Definition: rv34.h:104
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1459
h264_chroma_mc_func put_chroma_pixels_tab[3]
Definition: rv34dsp.h:60
Intra macroblock.
Definition: rv34.h:43
void(* loop_filter)(struct RV34DecContext *r, int row)
Definition: rv34.h:129
int chroma_vlc
which VLC set will be used for decoding of chroma blocks
Definition: rv34.h:100
#define MB_TYPE_L0
Definition: mpegutils.h:67
for(j=16;j >0;--j)
RV34DSPContext rdsp
Definition: rv34.h:86
Predicted.
Definition: avutil.h:275
int pts
frame timestamp
Definition: rv34.h:80
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
#define V
Definition: avdct.c:30
uint8_t * cbp_chroma
CBP values for chroma subblocks.
Definition: rv34.h:115