FFmpeg
vp56.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * VP5 and VP6 compatible video decoder (common features)
24  */
25 
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "internal.h"
29 #include "h264chroma.h"
30 #include "vp56.h"
31 #include "vp56data.h"
32 
33 
34 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
35 {
36  if (s->quantizer != quantizer)
37  ff_vp3dsp_set_bounding_values(s->bounding_values_array, ff_vp56_filter_threshold[quantizer]);
38  s->quantizer = quantizer;
39  s->dequant_dc = ff_vp56_dc_dequant[quantizer] << 2;
40  s->dequant_ac = ff_vp56_ac_dequant[quantizer] << 2;
41 }
42 
43 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
44  VP56Frame ref_frame)
45 {
46  int nb_pred = 0;
47  VP56mv vect[2] = {{0,0}, {0,0}};
48  int pos, offset;
49  VP56mv mvp;
50 
51  for (pos=0; pos<12; pos++) {
52  mvp.x = col + ff_vp56_candidate_predictor_pos[pos][0];
53  mvp.y = row + ff_vp56_candidate_predictor_pos[pos][1];
54  if (mvp.x < 0 || mvp.x >= s->mb_width ||
55  mvp.y < 0 || mvp.y >= s->mb_height)
56  continue;
57  offset = mvp.x + s->mb_width*mvp.y;
58 
59  if (ff_vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
60  continue;
61  if ((s->macroblocks[offset].mv.x == vect[0].x &&
62  s->macroblocks[offset].mv.y == vect[0].y) ||
63  (s->macroblocks[offset].mv.x == 0 &&
64  s->macroblocks[offset].mv.y == 0))
65  continue;
66 
67  vect[nb_pred++] = s->macroblocks[offset].mv;
68  if (nb_pred > 1) {
69  nb_pred = -1;
70  break;
71  }
72  s->vector_candidate_pos = pos;
73  }
74 
75  s->vector_candidate[0] = vect[0];
76  s->vector_candidate[1] = vect[1];
77 
78  return nb_pred+1;
79 }
80 
81 static void vp56_parse_mb_type_models(VP56Context *s)
82 {
83  VP56RangeCoder *c = &s->c;
84  VP56Model *model = s->modelp;
85  int i, ctx, type;
86 
87  for (ctx=0; ctx<3; ctx++) {
88  if (vp56_rac_get_prob_branchy(c, 174)) {
89  int idx = vp56_rac_gets(c, 4);
90  memcpy(model->mb_types_stats[ctx],
92  sizeof(model->mb_types_stats[ctx]));
93  }
94  if (vp56_rac_get_prob_branchy(c, 254)) {
95  for (type=0; type<10; type++) {
96  for(i=0; i<2; i++) {
97  if (vp56_rac_get_prob_branchy(c, 205)) {
98  int delta, sign = vp56_rac_get(c);
99 
102  if (!delta)
103  delta = 4 * vp56_rac_gets(c, 7);
104  model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
105  }
106  }
107  }
108  }
109  }
110 
111  /* compute MB type probability tables based on previous MB type */
112  for (ctx=0; ctx<3; ctx++) {
113  int p[10];
114 
115  for (type=0; type<10; type++)
116  p[type] = 100 * model->mb_types_stats[ctx][type][1];
117 
118  for (type=0; type<10; type++) {
119  int p02, p34, p0234, p17, p56, p89, p5689, p156789;
120 
121  /* conservative MB type probability */
122  model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
123 
124  p[type] = 0; /* same MB type => weight is null */
125 
126  /* binary tree parsing probabilities */
127  p02 = p[0] + p[2];
128  p34 = p[3] + p[4];
129  p0234 = p02 + p34;
130  p17 = p[1] + p[7];
131  p56 = p[5] + p[6];
132  p89 = p[8] + p[9];
133  p5689 = p56 + p89;
134  p156789 = p17 + p5689;
135 
136  model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
137  model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
138  model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
139  model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
140  model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
141  model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
142  model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
143  model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
144  model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
145 
146  /* restore initial value */
147  p[type] = 100 * model->mb_types_stats[ctx][type][1];
148  }
149  }
150 }
151 
152 static VP56mb vp56_parse_mb_type(VP56Context *s,
153  VP56mb prev_type, int ctx)
154 {
155  uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
156  VP56RangeCoder *c = &s->c;
157 
158  if (vp56_rac_get_prob_branchy(c, mb_type_model[0]))
159  return prev_type;
160  else
161  return vp56_rac_get_tree(c, ff_vp56_pmbt_tree, mb_type_model);
162 }
163 
164 static void vp56_decode_4mv(VP56Context *s, int row, int col)
165 {
166  VP56mv mv = {0,0};
167  int type[4];
168  int b;
169 
170  /* parse each block type */
171  for (b=0; b<4; b++) {
172  type[b] = vp56_rac_gets(&s->c, 2);
173  if (type[b])
174  type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
175  }
176 
177  /* get vectors */
178  for (b=0; b<4; b++) {
179  switch (type[b]) {
181  s->mv[b] = (VP56mv) {0,0};
182  break;
184  s->parse_vector_adjustment(s, &s->mv[b]);
185  break;
186  case VP56_MB_INTER_V1_PF:
187  s->mv[b] = s->vector_candidate[0];
188  break;
189  case VP56_MB_INTER_V2_PF:
190  s->mv[b] = s->vector_candidate[1];
191  break;
192  }
193  mv.x += s->mv[b].x;
194  mv.y += s->mv[b].y;
195  }
196 
197  /* this is the one selected for the whole MB for prediction */
198  s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
199 
200  /* chroma vectors are average luma vectors */
201  s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
202  s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
203 }
204 
205 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
206 {
207  VP56mv *mv, vect = {0,0};
208  int ctx, b;
209 
211  s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
212  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
213 
214  switch (s->mb_type) {
215  case VP56_MB_INTER_V1_PF:
216  mv = &s->vector_candidate[0];
217  break;
218 
219  case VP56_MB_INTER_V2_PF:
220  mv = &s->vector_candidate[1];
221  break;
222 
223  case VP56_MB_INTER_V1_GF:
225  mv = &s->vector_candidate[0];
226  break;
227 
228  case VP56_MB_INTER_V2_GF:
230  mv = &s->vector_candidate[1];
231  break;
232 
234  s->parse_vector_adjustment(s, &vect);
235  mv = &vect;
236  break;
237 
240  s->parse_vector_adjustment(s, &vect);
241  mv = &vect;
242  break;
243 
244  case VP56_MB_INTER_4V:
245  vp56_decode_4mv(s, row, col);
246  return s->mb_type;
247 
248  default:
249  mv = &vect;
250  break;
251  }
252 
253  s->macroblocks[row*s->mb_width + col].mv = *mv;
254 
255  /* same vector for all blocks */
256  for (b=0; b<6; b++)
257  s->mv[b] = *mv;
258 
259  return s->mb_type;
260 }
261 
262 static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
263 {
264  VP56mv *mv, vect = {0,0};
265  int b;
266 
267  s->mb_type = VP56_MB_INTER_NOVEC_PF;
268  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
269 
270  mv = &vect;
271 
272  s->macroblocks[row*s->mb_width + col].mv = *mv;
273 
274  /* same vector for all blocks */
275  for (b=0; b<6; b++)
276  s->mv[b] = *mv;
277 
278  return s->mb_type;
279 }
280 
281 static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
282 {
283  int idx = s->idct_scantable[0];
284  int b;
285 
286  for (b=0; b<6; b++) {
287  VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
288  VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
289  int count = 0;
290  int dc = 0;
291  int i;
292 
293  if (ref_frame == lb->ref_frame) {
294  dc += lb->dc_coeff;
295  count++;
296  }
297  if (ref_frame == ab->ref_frame) {
298  dc += ab->dc_coeff;
299  count++;
300  }
301  if (s->avctx->codec->id == AV_CODEC_ID_VP5)
302  for (i=0; i<2; i++)
303  if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
304  dc += ab[-1+2*i].dc_coeff;
305  count++;
306  }
307  if (count == 0)
308  dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
309  else if (count == 2)
310  dc /= 2;
311 
312  s->block_coeff[b][idx] += dc;
313  s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
314  ab->dc_coeff = s->block_coeff[b][idx];
315  ab->ref_frame = ref_frame;
316  lb->dc_coeff = s->block_coeff[b][idx];
317  lb->ref_frame = ref_frame;
318  s->block_coeff[b][idx] *= s->dequant_dc;
319  }
320 }
321 
322 static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
323  ptrdiff_t stride, int dx, int dy)
324 {
325  if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
326  int t = ff_vp56_filter_threshold[s->quantizer];
327  if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
328  if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
329  } else {
330  int * bounding_values = s->bounding_values_array + 127;
331  if (dx)
332  ff_vp3dsp_h_loop_filter_12(yuv + 10-dx, stride, bounding_values);
333  if (dy)
334  ff_vp3dsp_v_loop_filter_12(yuv + stride*(10-dy), stride, bounding_values);
335  }
336 }
337 
338 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
339  ptrdiff_t stride, int x, int y)
340 {
341  uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b];
342  uint8_t *src_block;
343  int src_offset;
344  int overlap_offset = 0;
345  int mask = s->vp56_coord_div[b] - 1;
346  int deblock_filtering = s->deblock_filtering;
347  int dx;
348  int dy;
349 
350  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
351  (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
352  && !s->frames[VP56_FRAME_CURRENT]->key_frame))
353  deblock_filtering = 0;
354 
355  dx = s->mv[b].x / s->vp56_coord_div[b];
356  dy = s->mv[b].y / s->vp56_coord_div[b];
357 
358  if (b >= 4) {
359  x /= 2;
360  y /= 2;
361  }
362  x += dx - 2;
363  y += dy - 2;
364 
365  if (x<0 || x+12>=s->plane_width[plane] ||
366  y<0 || y+12>=s->plane_height[plane]) {
367  s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
368  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
369  stride, stride,
370  12, 12, x, y,
371  s->plane_width[plane],
372  s->plane_height[plane]);
373  src_block = s->edge_emu_buffer;
374  src_offset = 2 + 2*stride;
375  } else if (deblock_filtering) {
376  /* only need a 12x12 block, but there is no such dsp function, */
377  /* so copy a 16x12 block */
378  s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
379  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
380  stride, 12);
381  src_block = s->edge_emu_buffer;
382  src_offset = 2 + 2*stride;
383  } else {
384  src_block = src;
385  src_offset = s->block_offset[b] + dy*stride + dx;
386  }
387 
388  if (deblock_filtering)
389  vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
390 
391  if (s->mv[b].x & mask)
392  overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
393  if (s->mv[b].y & mask)
394  overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
395 
396  if (overlap_offset) {
397  if (s->filter)
398  s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
399  stride, s->mv[b], mask, s->filter_selection, b<4);
400  else
401  s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
402  src_block+src_offset+overlap_offset,
403  stride, 8);
404  } else {
405  s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
406  }
407 }
408 
409 static void vp56_idct_put(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
410 {
411  if (selector > 10 || selector == 1)
412  s->vp3dsp.idct_put(dest, stride, block);
413  else
415 }
416 
417 static void vp56_idct_add(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
418 {
419  if (selector > 10)
420  s->vp3dsp.idct_add(dest, stride, block);
421  else if (selector > 1)
423  else
424  s->vp3dsp.idct_dc_add(dest, stride, block);
425 }
426 
427 static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
428 {
429  int b, ab, b_max, plane, off;
430  AVFrame *frame_current, *frame_ref;
431  VP56Frame ref_frame = ff_vp56_reference_frame[mb_type];
432 
433  vp56_add_predictors_dc(s, ref_frame);
434 
435  frame_current = s->frames[VP56_FRAME_CURRENT];
436  frame_ref = s->frames[ref_frame];
437  if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
438  return;
439 
440  ab = 6*is_alpha;
441  b_max = 6 - 2*is_alpha;
442 
443  switch (mb_type) {
444  case VP56_MB_INTRA:
445  for (b=0; b<b_max; b++) {
446  plane = ff_vp56_b2p[b+ab];
447  vp56_idct_put(s, frame_current->data[plane] + s->block_offset[b],
448  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
449  }
450  break;
451 
454  for (b=0; b<b_max; b++) {
455  plane = ff_vp56_b2p[b+ab];
456  off = s->block_offset[b];
457  s->hdsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
458  frame_ref->data[plane] + off,
459  s->stride[plane], 8);
460  vp56_idct_add(s, frame_current->data[plane] + off,
461  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
462  }
463  break;
464 
466  case VP56_MB_INTER_V1_PF:
467  case VP56_MB_INTER_V2_PF:
469  case VP56_MB_INTER_4V:
470  case VP56_MB_INTER_V1_GF:
471  case VP56_MB_INTER_V2_GF:
472  for (b=0; b<b_max; b++) {
473  int x_off = b==1 || b==3 ? 8 : 0;
474  int y_off = b==2 || b==3 ? 8 : 0;
475  plane = ff_vp56_b2p[b+ab];
476  vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
477  16*col+x_off, 16*row+y_off);
478  vp56_idct_add(s, frame_current->data[plane] + s->block_offset[b],
479  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
480  }
481  break;
482  }
483 
484  if (is_alpha) {
485  s->block_coeff[4][0] = 0;
486  s->block_coeff[5][0] = 0;
487  }
488 }
489 
490 static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
491 {
492  VP56mb mb_type;
493  int ret;
494 
495  if (s->frames[VP56_FRAME_CURRENT]->key_frame)
496  mb_type = VP56_MB_INTRA;
497  else
498  mb_type = vp56_decode_mv(s, row, col);
499 
500  ret = s->parse_coeff(s);
501  if (ret < 0)
502  return ret;
503 
504  vp56_render_mb(s, row, col, is_alpha, mb_type);
505 
506  return 0;
507 }
508 
509 static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
510 {
511  VP56mb mb_type;
512 
513  if (s->frames[VP56_FRAME_CURRENT]->key_frame)
514  mb_type = VP56_MB_INTRA;
515  else
516  mb_type = vp56_conceal_mv(s, row, col);
517 
518  vp56_render_mb(s, row, col, is_alpha, mb_type);
519 
520  return 0;
521 }
522 
523 static int vp56_size_changed(VP56Context *s)
524 {
525  AVCodecContext *avctx = s->avctx;
526  int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0];
527  int i;
528 
529  s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
530  s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
531  s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
532  s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
533 
534  s->have_undamaged_frame = 0;
535 
536  for (i=0; i<4; i++)
537  s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i];
538 
539  s->mb_width = (avctx->coded_width +15) / 16;
540  s->mb_height = (avctx->coded_height+15) / 16;
541 
542  if (s->mb_width > 1000 || s->mb_height > 1000) {
543  ff_set_dimensions(avctx, 0, 0);
544  av_log(avctx, AV_LOG_ERROR, "picture too big\n");
545  return AVERROR_INVALIDDATA;
546  }
547 
548  av_reallocp_array(&s->above_blocks, 4*s->mb_width+6,
549  sizeof(*s->above_blocks));
550  av_reallocp_array(&s->macroblocks, s->mb_width*s->mb_height,
551  sizeof(*s->macroblocks));
552  av_free(s->edge_emu_buffer_alloc);
553  s->edge_emu_buffer_alloc = av_malloc(16*stride);
554  s->edge_emu_buffer = s->edge_emu_buffer_alloc;
555  if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc)
556  return AVERROR(ENOMEM);
557  if (s->flip < 0)
558  s->edge_emu_buffer += 15 * stride;
559 
560  if (s->alpha_context)
561  return vp56_size_changed(s->alpha_context);
562 
563  return 0;
564 }
565 
566 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
567 
568 int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
569  AVPacket *avpkt)
570 {
571  const uint8_t *buf = avpkt->data;
572  VP56Context *s = avctx->priv_data;
573  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
574  int remaining_buf_size = avpkt->size;
575  int alpha_offset = remaining_buf_size;
576  int i, res;
577  int ret;
578 
579  if (s->has_alpha) {
580  if (remaining_buf_size < 3)
581  return AVERROR_INVALIDDATA;
582  alpha_offset = bytestream_get_be24(&buf);
583  remaining_buf_size -= 3;
584  if (remaining_buf_size < alpha_offset)
585  return AVERROR_INVALIDDATA;
586  }
587 
588  res = s->parse_header(s, buf, alpha_offset);
589  if (res < 0)
590  return res;
591 
592  if (res == VP56_SIZE_CHANGE) {
593  for (i = 0; i < 4; i++) {
594  av_frame_unref(s->frames[i]);
595  if (s->alpha_context)
596  av_frame_unref(s->alpha_context->frames[i]);
597  }
598  }
599 
601  if (ret < 0) {
602  if (res == VP56_SIZE_CHANGE)
603  ff_set_dimensions(avctx, 0, 0);
604  return ret;
605  }
606 
607  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
608  av_frame_unref(s->alpha_context->frames[VP56_FRAME_CURRENT]);
609  if ((ret = av_frame_ref(s->alpha_context->frames[VP56_FRAME_CURRENT], p)) < 0) {
610  av_frame_unref(p);
611  if (res == VP56_SIZE_CHANGE)
612  ff_set_dimensions(avctx, 0, 0);
613  return ret;
614  }
615  }
616 
617  if (res == VP56_SIZE_CHANGE) {
618  if (vp56_size_changed(s)) {
619  av_frame_unref(p);
620  return AVERROR_INVALIDDATA;
621  }
622  }
623 
624  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
625  int bak_w = avctx->width;
626  int bak_h = avctx->height;
627  int bak_cw = avctx->coded_width;
628  int bak_ch = avctx->coded_height;
629  buf += alpha_offset;
630  remaining_buf_size -= alpha_offset;
631 
632  res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
633  if (res != 0) {
634  if(res==VP56_SIZE_CHANGE) {
635  av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
636  avctx->width = bak_w;
637  avctx->height = bak_h;
638  avctx->coded_width = bak_cw;
639  avctx->coded_height = bak_ch;
640  }
641  av_frame_unref(p);
642  return AVERROR_INVALIDDATA;
643  }
644  }
645 
646  s->discard_frame = 0;
647  avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) + 1);
648 
649  if (s->discard_frame)
650  return AVERROR_INVALIDDATA;
651 
652  if ((res = av_frame_ref(data, p)) < 0)
653  return res;
654  *got_frame = 1;
655 
656  return avpkt->size;
657 }
658 
659 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
660  int jobnr, int threadnr)
661 {
662  VP56Context *s0 = avctx->priv_data;
663  int is_alpha = (jobnr == 1);
664  VP56Context *s = is_alpha ? s0->alpha_context : s0;
665  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
666  int mb_row, mb_col, mb_row_flip, mb_offset = 0;
667  int block, y, uv;
668  ptrdiff_t stride_y, stride_uv;
669  int res;
670  int damaged = 0;
671 
672  if (p->key_frame) {
674  s->default_models_init(s);
675  for (block=0; block<s->mb_height*s->mb_width; block++)
676  s->macroblocks[block].type = VP56_MB_INTRA;
677  } else {
680  s->parse_vector_models(s);
681  s->mb_type = VP56_MB_INTER_NOVEC_PF;
682  }
683 
684  if (s->parse_coeff_models(s))
685  goto next;
686 
687  memset(s->prev_dc, 0, sizeof(s->prev_dc));
688  s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
689  s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
690 
691  for (block=0; block < 4*s->mb_width+6; block++) {
692  s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
693  s->above_blocks[block].dc_coeff = 0;
694  s->above_blocks[block].not_null_dc = 0;
695  }
696  s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
697  s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
698 
699  stride_y = p->linesize[0];
700  stride_uv = p->linesize[1];
701 
702  if (s->flip < 0)
703  mb_offset = 7;
704 
705  /* main macroblocks loop */
706  for (mb_row=0; mb_row<s->mb_height; mb_row++) {
707  if (s->flip < 0)
708  mb_row_flip = s->mb_height - mb_row - 1;
709  else
710  mb_row_flip = mb_row;
711 
712  for (block=0; block<4; block++) {
713  s->left_block[block].ref_frame = VP56_FRAME_NONE;
714  s->left_block[block].dc_coeff = 0;
715  s->left_block[block].not_null_dc = 0;
716  }
717  memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
718  memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
719 
720  s->above_block_idx[0] = 1;
721  s->above_block_idx[1] = 2;
722  s->above_block_idx[2] = 1;
723  s->above_block_idx[3] = 2;
724  s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
725  s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
726 
727  s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
728  s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
729  s->block_offset[1] = s->block_offset[0] + 8;
730  s->block_offset[3] = s->block_offset[2] + 8;
731  s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
732  s->block_offset[5] = s->block_offset[4];
733 
734  for (mb_col=0; mb_col<s->mb_width; mb_col++) {
735  if (!damaged) {
736  int ret = vp56_decode_mb(s, mb_row, mb_col, is_alpha);
737  if (ret < 0) {
738  damaged = 1;
739  if (!s->have_undamaged_frame || !avctx->error_concealment) {
740  s->discard_frame = 1;
741  return AVERROR_INVALIDDATA;
742  }
743  }
744  }
745  if (damaged)
746  vp56_conceal_mb(s, mb_row, mb_col, is_alpha);
747 
748  for (y=0; y<4; y++) {
749  s->above_block_idx[y] += 2;
750  s->block_offset[y] += 16;
751  }
752 
753  for (uv=4; uv<6; uv++) {
754  s->above_block_idx[uv] += 1;
755  s->block_offset[uv] += 8;
756  }
757  }
758  }
759 
760  if (!damaged)
761  s->have_undamaged_frame = 1;
762 
763 next:
764  if (p->key_frame || s->golden_frame) {
766  if ((res = av_frame_ref(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
767  return res;
768  }
769 
771  FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT],
772  s->frames[VP56_FRAME_PREVIOUS]);
773  return 0;
774 }
775 
776 av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
777 {
778  VP56Context *s = avctx->priv_data;
779  return ff_vp56_init_context(avctx, s, flip, has_alpha);
780 }
781 
782 av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
783  int flip, int has_alpha)
784 {
785  int i;
786 
787  s->avctx = avctx;
788  avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
789  if (avctx->skip_alpha) avctx->pix_fmt = AV_PIX_FMT_YUV420P;
790 
791  ff_h264chroma_init(&s->h264chroma, 8);
792  ff_hpeldsp_init(&s->hdsp, avctx->flags);
793  ff_videodsp_init(&s->vdsp, 8);
794  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
795  for (i = 0; i < 64; i++) {
796 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
797  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
798 #undef TRANSPOSE
799  }
800 
801  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
802  s->frames[i] = av_frame_alloc();
803  if (!s->frames[i]) {
804  ff_vp56_free(avctx);
805  return AVERROR(ENOMEM);
806  }
807  }
808  s->edge_emu_buffer_alloc = NULL;
809 
810  s->above_blocks = NULL;
811  s->macroblocks = NULL;
812  s->quantizer = -1;
813  s->deblock_filtering = 1;
814  s->golden_frame = 0;
815 
816  s->filter = NULL;
817 
818  s->has_alpha = has_alpha;
819 
820  s->modelp = &s->model;
821 
822  if (flip) {
823  s->flip = -1;
824  s->frbi = 2;
825  s->srbi = 0;
826  } else {
827  s->flip = 1;
828  s->frbi = 0;
829  s->srbi = 2;
830  }
831 
832  return 0;
833 }
834 
836 {
837  VP56Context *s = avctx->priv_data;
838  return ff_vp56_free_context(s);
839 }
840 
841 av_cold int ff_vp56_free_context(VP56Context *s)
842 {
843  int i;
844 
845  av_freep(&s->above_blocks);
846  av_freep(&s->macroblocks);
847  av_freep(&s->edge_emu_buffer_alloc);
848 
849  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
850  av_frame_free(&s->frames[i]);
851 
852  return 0;
853 }
vp56_rac_get
static av_always_inline int vp56_rac_get(VP56RangeCoder *c)
Definition: vp56.h:305
vp56_add_predictors_dc
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
Definition: vp56.c:281
vp56_conceal_mb
static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:509
stride
int stride
Definition: mace.c:144
ff_vp56_init
av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
Definition: vp56.c:776
vp56_parse_mb_type_models
static void vp56_parse_mb_type_models(VP56Context *s)
Definition: vp56.c:81
VP56mv::x
int16_t x
Definition: vp56.h:69
vp56data.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
VP56_MB_INTER_DELTA_GF
@ VP56_MB_INTER_DELTA_GF
Inter MB, above/left vector + delta, from golden frame.
Definition: vp56.h:57
ff_vp3dsp_idct10_put
void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:341
ff_vp3dsp_idct10_add
void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:347
VP56Model::mb_types_stats
uint8_t mb_types_stats[3][10][2]
Definition: vp56.h:123
VP56_MB_INTER_V2_GF
@ VP56_MB_INTER_V2_GF
Inter MB, second vector, from golden frame.
Definition: vp56.h:60
ff_vp56_decode_mbs
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
Definition: vp56.c:659
vp56_conceal_mv
static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
Definition: vp56.c:262
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
ff_vp56_pmbtm_tree
const VP56Tree ff_vp56_pmbtm_tree[]
Definition: vp56data.c:219
ff_vp56_reference_frame
const VP56Frame ff_vp56_reference_frame[]
Definition: vp56data.c:70
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
vp56_decode_4mv
static void vp56_decode_4mv(VP56Context *s, int row, int col)
Definition: vp56.c:164
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:476
ff_vp56_init_context
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
Definition: vp56.c:782
VP56RefDc::ref_frame
VP56Frame ref_frame
Definition: vp56.h:99
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
ff_vp56_pmbt_tree
const VP56Tree ff_vp56_pmbt_tree[]
Definition: vp56data.c:228
TRANSPOSE
#define TRANSPOSE(x)
ff_vp56_filter_threshold
const uint8_t ff_vp56_filter_threshold[]
Definition: vp56data.c:204
vp56_size_changed
static int vp56_size_changed(VP56Context *s)
Definition: vp56.c:523
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP56_MB_INTRA
@ VP56_MB_INTRA
Intra MB.
Definition: vp56.h:52
vp56_rac_get_prob_branchy
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:287
VP56RefDc
Definition: vp56.h:97
VP56_MB_INTER_NOVEC_PF
@ VP56_MB_INTER_NOVEC_PF
Inter MB, no vector, from previous frame.
Definition: vp56.h:51
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:409
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
ff_vp56_b6to4
const uint8_t ff_vp56_b6to4[]
Definition: vp56data.c:29
vp56_deblock_filter
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, ptrdiff_t stride, int dx, int dy)
Definition: vp56.c:322
VP56_MB_INTER_V1_PF
@ VP56_MB_INTER_V1_PF
Inter MB, first vector, from previous frame.
Definition: vp56.h:54
vp56_idct_add
static void vp56_idct_add(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:417
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
VP56_SIZE_CHANGE
#define VP56_SIZE_CHANGE
Definition: vp56.h:73
VP56RefDc::dc_coeff
int16_t dc_coeff
Definition: vp56.h:100
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_vp56_dc_dequant
const uint8_t ff_vp56_dc_dequant[64]
Definition: vp56data.c:94
VP56Model::mb_type
uint8_t mb_type[3][10][10]
Definition: vp56.h:122
mask
static const uint16_t mask[17]
Definition: lzw.c:38
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
vp56_mc
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, ptrdiff_t stride, int x, int y)
Definition: vp56.c:338
vp56_get_vectors_predictors
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
Definition: vp56.c:43
VP56_MB_INTER_4V
@ VP56_MB_INTER_4V
Inter MB, 4 vectors, from previous frame.
Definition: vp56.h:58
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
AVCodecContext::error_concealment
int error_concealment
error concealment flags
Definition: avcodec.h:1292
vp56_rac_gets
static int vp56_rac_gets(VP56RangeCoder *c, int bits)
Definition: vp56.h:329
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ff_vp56_free_context
av_cold int ff_vp56_free_context(VP56Context *s)
Definition: vp56.c:841
VP56mv::y
int16_t y
Definition: vp56.h:70
VP56mv
Definition: vp56.h:68
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:47
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
vp56.h
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
NULL
#define NULL
Definition: coverity.c:32
VP56_MB_INTER_DELTA_PF
@ VP56_MB_INTER_DELTA_PF
Inter MB, above/left vector + delta, from previous frame.
Definition: vp56.h:53
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_vp56_mb_type_model_model
const uint8_t ff_vp56_mb_type_model_model[]
Definition: vp56data.c:215
VP56_MB_INTER_NOVEC_GF
@ VP56_MB_INTER_NOVEC_GF
Inter MB, no vector, from golden frame.
Definition: vp56.h:56
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
ff_vp56_pre_def_mb_type_stats
const uint8_t ff_vp56_pre_def_mb_type_stats[16][3][10][2]
Definition: vp56data.c:105
VP56_MB_INTER_V2_PF
@ VP56_MB_INTER_V2_PF
Inter MB, second vector, from previous frame.
Definition: vp56.h:55
vp56_parse_mb_type
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
Definition: vp56.c:152
vp56_render_mb
static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
Definition: vp56.c:427
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:414
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:131
AVPacket::size
int size
Definition: packet.h:374
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
h264chroma.h
AVCodecContext::skip_alpha
int skip_alpha
Skip processing alpha if supported by codec.
Definition: avcodec.h:1774
VP56_FRAME_NONE
@ VP56_FRAME_NONE
Definition: vp56.h:43
vp56_rac_get_tree
static av_always_inline int vp56_rac_get_tree(VP56RangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
Definition: vp56.h:381
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:232
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
VP56_MB_INTER_V1_GF
@ VP56_MB_INTER_V1_GF
Inter MB, first vector, from golden frame.
Definition: vp56.h:59
AV_CODEC_ID_VP5
@ AV_CODEC_ID_VP5
Definition: codec_id.h:140
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
VP56_FRAME_GOLDEN
@ VP56_FRAME_GOLDEN
Definition: vp56.h:46
delta
float delta
Definition: vorbis_enc_data.h:430
av_always_inline
#define av_always_inline
Definition: attributes.h:49
VP56mb
VP56mb
Definition: vp56.h:50
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
VP56Frame
VP56Frame
Definition: vp56.h:42
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
pos
unsigned int pos
Definition: spdifenc.c:412
ff_vp56_candidate_predictor_pos
const int8_t ff_vp56_candidate_predictor_pos[12][2]
Definition: vp56data.c:241
VP56_FRAME_CURRENT
@ VP56_FRAME_CURRENT
Definition: vp56.h:44
VP56RangeCoder
Definition: vp56.h:87
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ff_vp56_b2p
const uint8_t ff_vp56_b2p[]
Definition: vp56data.c:28
VP56Model
Definition: vp56.h:108
VP56_FRAME_PREVIOUS
@ VP56_FRAME_PREVIOUS
Definition: vp56.h:45
ff_vp56_init_dequant
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
Definition: vp56.c:34
ff_vp56_decode_frame
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp56.c:568
ff_vp56_free
av_cold int ff_vp56_free(AVCodecContext *avctx)
Definition: vp56.c:835
ff_vp56_ac_dequant
const uint8_t ff_vp56_ac_dequant[64]
Definition: vp56data.c:83
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
s0
#define s0
Definition: regdef.h:37
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
vp56_idct_put
static void vp56_idct_put(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:409
vp56_decode_mv
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
Definition: vp56.c:205
vp56_decode_mb
static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:490
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1511