FFmpeg
vp56.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * VP5 and VP6 compatible video decoder (common features)
24  */
25 
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "decode.h"
29 #include "h264chroma.h"
30 #include "vp56.h"
31 #include "vp56data.h"
32 #include "vpx_rac.h"
33 
34 
35 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
36 {
37  if (s->quantizer != quantizer)
38  ff_vp3dsp_set_bounding_values(s->bounding_values_array, ff_vp56_filter_threshold[quantizer]);
39  s->quantizer = quantizer;
40  s->dequant_dc = ff_vp56_dc_dequant[quantizer] << 2;
41  s->dequant_ac = ff_vp56_ac_dequant[quantizer] << 2;
42 }
43 
44 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
46 {
47  int nb_pred = 0;
48  VP56mv vect[2] = {{0,0}, {0,0}};
49  int pos, offset;
50  VP56mv mvp;
51 
52  for (pos=0; pos<12; pos++) {
55  if (mvp.x < 0 || mvp.x >= s->mb_width ||
56  mvp.y < 0 || mvp.y >= s->mb_height)
57  continue;
58  offset = mvp.x + s->mb_width*mvp.y;
59 
60  if (ff_vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
61  continue;
62  if ((s->macroblocks[offset].mv.x == vect[0].x &&
63  s->macroblocks[offset].mv.y == vect[0].y) ||
64  (s->macroblocks[offset].mv.x == 0 &&
65  s->macroblocks[offset].mv.y == 0))
66  continue;
67 
68  vect[nb_pred++] = s->macroblocks[offset].mv;
69  if (nb_pred > 1) {
70  nb_pred = -1;
71  break;
72  }
73  s->vector_candidate_pos = pos;
74  }
75 
76  s->vector_candidate[0] = vect[0];
77  s->vector_candidate[1] = vect[1];
78 
79  return nb_pred+1;
80 }
81 
82 static void vp56_parse_mb_type_models(VP56Context *s)
83 {
84  VPXRangeCoder *c = &s->c;
85  VP56Model *model = s->modelp;
86  int i, ctx, type;
87 
88  for (ctx=0; ctx<3; ctx++) {
89  if (vpx_rac_get_prob_branchy(c, 174)) {
90  int idx = vp56_rac_gets(c, 4);
91  memcpy(model->mb_types_stats[ctx],
93  sizeof(model->mb_types_stats[ctx]));
94  }
95  if (vpx_rac_get_prob_branchy(c, 254)) {
96  for (type=0; type<10; type++) {
97  for(i=0; i<2; i++) {
98  if (vpx_rac_get_prob_branchy(c, 205)) {
99  int delta, sign = vpx_rac_get(c);
100 
103  if (!delta)
104  delta = 4 * vp56_rac_gets(c, 7);
105  model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
106  }
107  }
108  }
109  }
110  }
111 
112  /* compute MB type probability tables based on previous MB type */
113  for (ctx=0; ctx<3; ctx++) {
114  int p[10];
115 
116  for (type=0; type<10; type++)
117  p[type] = 100 * model->mb_types_stats[ctx][type][1];
118 
119  for (type=0; type<10; type++) {
120  int p02, p34, p0234, p17, p56, p89, p5689, p156789;
121 
122  /* conservative MB type probability */
123  model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
124 
125  p[type] = 0; /* same MB type => weight is null */
126 
127  /* binary tree parsing probabilities */
128  p02 = p[0] + p[2];
129  p34 = p[3] + p[4];
130  p0234 = p02 + p34;
131  p17 = p[1] + p[7];
132  p56 = p[5] + p[6];
133  p89 = p[8] + p[9];
134  p5689 = p56 + p89;
135  p156789 = p17 + p5689;
136 
137  model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
138  model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
139  model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
140  model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
141  model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
142  model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
143  model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
144  model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
145  model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
146 
147  /* restore initial value */
148  p[type] = 100 * model->mb_types_stats[ctx][type][1];
149  }
150  }
151 }
152 
153 static VP56mb vp56_parse_mb_type(VP56Context *s,
154  VP56mb prev_type, int ctx)
155 {
156  uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
157  VPXRangeCoder *c = &s->c;
158 
159  if (vpx_rac_get_prob_branchy(c, mb_type_model[0]))
160  return prev_type;
161  else
162  return vp56_rac_get_tree(c, ff_vp56_pmbt_tree, mb_type_model);
163 }
164 
165 static void vp56_decode_4mv(VP56Context *s, int row, int col)
166 {
167  VP56mv mv = {0,0};
168  int type[4];
169  int b;
170 
171  /* parse each block type */
172  for (b=0; b<4; b++) {
173  type[b] = vp56_rac_gets(&s->c, 2);
174  if (type[b])
175  type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
176  }
177 
178  /* get vectors */
179  for (b=0; b<4; b++) {
180  switch (type[b]) {
182  s->mv[b] = (VP56mv) {0,0};
183  break;
185  s->parse_vector_adjustment(s, &s->mv[b]);
186  break;
187  case VP56_MB_INTER_V1_PF:
188  s->mv[b] = s->vector_candidate[0];
189  break;
190  case VP56_MB_INTER_V2_PF:
191  s->mv[b] = s->vector_candidate[1];
192  break;
193  }
194  mv.x += s->mv[b].x;
195  mv.y += s->mv[b].y;
196  }
197 
198  /* this is the one selected for the whole MB for prediction */
199  s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
200 
201  /* chroma vectors are average luma vectors */
202  s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
203  s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
204 }
205 
206 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
207 {
208  VP56mv *mv, vect = {0,0};
209  int ctx, b;
210 
212  s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
213  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
214 
215  switch (s->mb_type) {
216  case VP56_MB_INTER_V1_PF:
217  mv = &s->vector_candidate[0];
218  break;
219 
220  case VP56_MB_INTER_V2_PF:
221  mv = &s->vector_candidate[1];
222  break;
223 
224  case VP56_MB_INTER_V1_GF:
226  mv = &s->vector_candidate[0];
227  break;
228 
229  case VP56_MB_INTER_V2_GF:
231  mv = &s->vector_candidate[1];
232  break;
233 
235  s->parse_vector_adjustment(s, &vect);
236  mv = &vect;
237  break;
238 
241  s->parse_vector_adjustment(s, &vect);
242  mv = &vect;
243  break;
244 
245  case VP56_MB_INTER_4V:
246  vp56_decode_4mv(s, row, col);
247  return s->mb_type;
248 
249  default:
250  mv = &vect;
251  break;
252  }
253 
254  s->macroblocks[row*s->mb_width + col].mv = *mv;
255 
256  /* same vector for all blocks */
257  for (b=0; b<6; b++)
258  s->mv[b] = *mv;
259 
260  return s->mb_type;
261 }
262 
263 static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
264 {
265  VP56mv *mv, vect = {0,0};
266  int b;
267 
268  s->mb_type = VP56_MB_INTER_NOVEC_PF;
269  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
270 
271  mv = &vect;
272 
273  s->macroblocks[row*s->mb_width + col].mv = *mv;
274 
275  /* same vector for all blocks */
276  for (b=0; b<6; b++)
277  s->mv[b] = *mv;
278 
279  return s->mb_type;
280 }
281 
282 static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
283 {
284  int idx = s->idct_scantable[0];
285  int b;
286 
287  for (b=0; b<6; b++) {
288  VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
289  VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
290  int count = 0;
291  int dc = 0;
292  int i;
293 
294  if (ref_frame == lb->ref_frame) {
295  dc += lb->dc_coeff;
296  count++;
297  }
298  if (ref_frame == ab->ref_frame) {
299  dc += ab->dc_coeff;
300  count++;
301  }
302  if (s->avctx->codec->id == AV_CODEC_ID_VP5)
303  for (i=0; i<2; i++)
304  if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
305  dc += ab[-1+2*i].dc_coeff;
306  count++;
307  }
308  if (count == 0)
309  dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
310  else if (count == 2)
311  dc /= 2;
312 
313  s->block_coeff[b][idx] += dc;
314  s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
315  ab->dc_coeff = s->block_coeff[b][idx];
316  ab->ref_frame = ref_frame;
317  lb->dc_coeff = s->block_coeff[b][idx];
318  lb->ref_frame = ref_frame;
319  s->block_coeff[b][idx] *= s->dequant_dc;
320  }
321 }
322 
323 static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
324  ptrdiff_t stride, int dx, int dy)
325 {
326  if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
327  int t = ff_vp56_filter_threshold[s->quantizer];
328  if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
329  if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
330  } else {
331  int * bounding_values = s->bounding_values_array + 127;
332  if (dx)
333  ff_vp3dsp_h_loop_filter_12(yuv + 10-dx, stride, bounding_values);
334  if (dy)
335  ff_vp3dsp_v_loop_filter_12(yuv + stride*(10-dy), stride, bounding_values);
336  }
337 }
338 
339 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
340  ptrdiff_t stride, int x, int y)
341 {
342  uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b];
343  uint8_t *src_block;
344  int src_offset;
345  int overlap_offset = 0;
346  int mask = s->vp56_coord_div[b] - 1;
347  int deblock_filtering = s->deblock_filtering;
348  int dx;
349  int dy;
350 
351  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
352  (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
353  && !(s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)))
354  deblock_filtering = 0;
355 
356  dx = s->mv[b].x / s->vp56_coord_div[b];
357  dy = s->mv[b].y / s->vp56_coord_div[b];
358 
359  if (b >= 4) {
360  x /= 2;
361  y /= 2;
362  }
363  x += dx - 2;
364  y += dy - 2;
365 
366  if (x<0 || x+12>=s->plane_width[plane] ||
367  y<0 || y+12>=s->plane_height[plane]) {
368  s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
369  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
370  stride, stride,
371  12, 12, x, y,
372  s->plane_width[plane],
373  s->plane_height[plane]);
374  src_block = s->edge_emu_buffer;
375  src_offset = 2 + 2*stride;
376  } else if (deblock_filtering) {
377  /* only need a 12x12 block, but there is no such dsp function, */
378  /* so copy a 16x12 block */
379  s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
380  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
381  stride, 12);
382  src_block = s->edge_emu_buffer;
383  src_offset = 2 + 2*stride;
384  } else {
385  src_block = src;
386  src_offset = s->block_offset[b] + dy*stride + dx;
387  }
388 
389  if (deblock_filtering)
390  vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
391 
392  if (s->mv[b].x & mask)
393  overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
394  if (s->mv[b].y & mask)
395  overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
396 
397  if (overlap_offset) {
398  if (s->filter)
399  s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
400  stride, s->mv[b], mask, s->filter_selection, b<4);
401  else
402  s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
403  src_block+src_offset+overlap_offset,
404  stride, 8);
405  } else {
406  s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
407  }
408 }
409 
410 static void vp56_idct_put(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
411 {
412  if (selector > 10 || selector == 1)
413  s->vp3dsp.idct_put(dest, stride, block);
414  else
416 }
417 
418 static void vp56_idct_add(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
419 {
420  if (selector > 10)
421  s->vp3dsp.idct_add(dest, stride, block);
422  else if (selector > 1)
424  else
425  s->vp3dsp.idct_dc_add(dest, stride, block);
426 }
427 
428 static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
429 {
430  int b, ab, b_max, plane, off;
431  AVFrame *frame_current, *frame_ref;
433 
435 
436  frame_current = s->frames[VP56_FRAME_CURRENT];
437  frame_ref = s->frames[ref_frame];
438  if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
439  return;
440 
441  ab = 6*is_alpha;
442  b_max = 6 - 2*is_alpha;
443 
444  switch (mb_type) {
445  case VP56_MB_INTRA:
446  for (b=0; b<b_max; b++) {
447  plane = ff_vp56_b2p[b+ab];
448  vp56_idct_put(s, frame_current->data[plane] + s->block_offset[b],
449  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
450  }
451  break;
452 
455  for (b=0; b<b_max; b++) {
456  plane = ff_vp56_b2p[b+ab];
457  off = s->block_offset[b];
458  s->hdsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
459  frame_ref->data[plane] + off,
460  s->stride[plane], 8);
461  vp56_idct_add(s, frame_current->data[plane] + off,
462  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
463  }
464  break;
465 
467  case VP56_MB_INTER_V1_PF:
468  case VP56_MB_INTER_V2_PF:
470  case VP56_MB_INTER_4V:
471  case VP56_MB_INTER_V1_GF:
472  case VP56_MB_INTER_V2_GF:
473  for (b=0; b<b_max; b++) {
474  int x_off = b==1 || b==3 ? 8 : 0;
475  int y_off = b==2 || b==3 ? 8 : 0;
476  plane = ff_vp56_b2p[b+ab];
477  vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
478  16*col+x_off, 16*row+y_off);
479  vp56_idct_add(s, frame_current->data[plane] + s->block_offset[b],
480  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
481  }
482  break;
483  }
484 
485  if (is_alpha) {
486  s->block_coeff[4][0] = 0;
487  s->block_coeff[5][0] = 0;
488  }
489 }
490 
491 static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
492 {
493  VP56mb mb_type;
494  int ret;
495 
496  if (s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)
497  mb_type = VP56_MB_INTRA;
498  else
499  mb_type = vp56_decode_mv(s, row, col);
500 
501  ret = s->parse_coeff(s);
502  if (ret < 0)
503  return ret;
504 
505  vp56_render_mb(s, row, col, is_alpha, mb_type);
506 
507  return 0;
508 }
509 
510 static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
511 {
512  VP56mb mb_type;
513 
514  if (s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)
515  mb_type = VP56_MB_INTRA;
516  else
517  mb_type = vp56_conceal_mv(s, row, col);
518 
519  vp56_render_mb(s, row, col, is_alpha, mb_type);
520 
521  return 0;
522 }
523 
524 static int vp56_size_changed(VP56Context *s)
525 {
526  AVCodecContext *avctx = s->avctx;
527  int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0];
528  int i;
529 
530  s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
531  s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
532  s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
533  s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
534 
535  s->have_undamaged_frame = 0;
536 
537  for (i=0; i<4; i++)
538  s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i];
539 
540  s->mb_width = (avctx->coded_width +15) / 16;
541  s->mb_height = (avctx->coded_height+15) / 16;
542 
543  if (s->mb_width > 1000 || s->mb_height > 1000) {
544  ff_set_dimensions(avctx, 0, 0);
545  av_log(avctx, AV_LOG_ERROR, "picture too big\n");
546  return AVERROR_INVALIDDATA;
547  }
548 
549  av_reallocp_array(&s->above_blocks, 4*s->mb_width+6,
550  sizeof(*s->above_blocks));
551  av_reallocp_array(&s->macroblocks, s->mb_width*s->mb_height,
552  sizeof(*s->macroblocks));
553  av_free(s->edge_emu_buffer_alloc);
554  s->edge_emu_buffer_alloc = av_malloc(16*stride);
555  s->edge_emu_buffer = s->edge_emu_buffer_alloc;
556  if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc)
557  return AVERROR(ENOMEM);
558  if (s->flip < 0)
559  s->edge_emu_buffer += 15 * stride;
560 
561  if (s->alpha_context)
562  return vp56_size_changed(s->alpha_context);
563 
564  return 0;
565 }
566 
567 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
568 
570  int *got_frame, AVPacket *avpkt)
571 {
572  const uint8_t *buf = avpkt->data;
573  VP56Context *s = avctx->priv_data;
574  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
575  int remaining_buf_size = avpkt->size;
576  int alpha_offset = remaining_buf_size;
577  int i, res;
578  int ret;
579 
580  if (s->has_alpha) {
581  if (remaining_buf_size < 3)
582  return AVERROR_INVALIDDATA;
583  alpha_offset = bytestream_get_be24(&buf);
584  remaining_buf_size -= 3;
585  if (remaining_buf_size < alpha_offset)
586  return AVERROR_INVALIDDATA;
587  }
588 
589  res = s->parse_header(s, buf, alpha_offset);
590  if (res < 0)
591  return res;
592 
593  if (res == VP56_SIZE_CHANGE) {
594  for (i = 0; i < 4; i++) {
595  av_frame_unref(s->frames[i]);
596  if (s->alpha_context)
597  av_frame_unref(s->alpha_context->frames[i]);
598  }
599  s->frames[VP56_FRAME_CURRENT]->flags |= AV_FRAME_FLAG_KEY; //FIXME
600  }
601 
603  if (ret < 0) {
604  if (res == VP56_SIZE_CHANGE)
605  ff_set_dimensions(avctx, 0, 0);
606  return ret;
607  }
608 
609  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
610  if ((ret = av_frame_replace(s->alpha_context->frames[VP56_FRAME_CURRENT], p)) < 0) {
611  av_frame_unref(p);
612  if (res == VP56_SIZE_CHANGE)
613  ff_set_dimensions(avctx, 0, 0);
614  return ret;
615  }
616  }
617 
618  if (res == VP56_SIZE_CHANGE) {
619  if (vp56_size_changed(s)) {
620  av_frame_unref(p);
621  return AVERROR_INVALIDDATA;
622  }
623  }
624 
625  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
626  int bak_w = avctx->width;
627  int bak_h = avctx->height;
628  int bak_cw = avctx->coded_width;
629  int bak_ch = avctx->coded_height;
630  buf += alpha_offset;
631  remaining_buf_size -= alpha_offset;
632 
633  res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
634  if (res != 0) {
635  if(res==VP56_SIZE_CHANGE) {
636  av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
637  avctx->width = bak_w;
638  avctx->height = bak_h;
639  avctx->coded_width = bak_cw;
640  avctx->coded_height = bak_ch;
641  }
642  av_frame_unref(p);
643  return AVERROR_INVALIDDATA;
644  }
645  }
646 
647  s->discard_frame = 0;
648  avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) + 1);
649 
650  if (s->discard_frame)
651  return AVERROR_INVALIDDATA;
652 
653  if ((res = av_frame_ref(rframe, p)) < 0)
654  return res;
655  *got_frame = 1;
656 
657  return avpkt->size;
658 }
659 
660 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
661  int jobnr, int threadnr)
662 {
663  VP56Context *s0 = avctx->priv_data;
664  int is_alpha = (jobnr == 1);
665  VP56Context *s = is_alpha ? s0->alpha_context : s0;
666  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
667  int mb_row, mb_col, mb_row_flip, mb_offset = 0;
668  int block, y, uv;
669  ptrdiff_t stride_y, stride_uv;
670  int res;
671  int damaged = 0;
672 
673  if (p->flags & AV_FRAME_FLAG_KEY) {
675  s->default_models_init(s);
676  for (block=0; block<s->mb_height*s->mb_width; block++)
677  s->macroblocks[block].type = VP56_MB_INTRA;
678  } else {
681  s->parse_vector_models(s);
682  s->mb_type = VP56_MB_INTER_NOVEC_PF;
683  }
684 
685  if (s->parse_coeff_models(s))
686  goto next;
687 
688  memset(s->prev_dc, 0, sizeof(s->prev_dc));
689  s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
690  s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
691 
692  for (block=0; block < 4*s->mb_width+6; block++) {
693  s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
694  s->above_blocks[block].dc_coeff = 0;
695  s->above_blocks[block].not_null_dc = 0;
696  }
697  s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
698  s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
699 
700  stride_y = p->linesize[0];
701  stride_uv = p->linesize[1];
702 
703  if (s->flip < 0)
704  mb_offset = 7;
705 
706  /* main macroblocks loop */
707  for (mb_row=0; mb_row<s->mb_height; mb_row++) {
708  if (s->flip < 0)
709  mb_row_flip = s->mb_height - mb_row - 1;
710  else
711  mb_row_flip = mb_row;
712 
713  for (block=0; block<4; block++) {
714  s->left_block[block].ref_frame = VP56_FRAME_NONE;
715  s->left_block[block].dc_coeff = 0;
716  s->left_block[block].not_null_dc = 0;
717  }
718  memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
719  memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
720 
721  s->above_block_idx[0] = 1;
722  s->above_block_idx[1] = 2;
723  s->above_block_idx[2] = 1;
724  s->above_block_idx[3] = 2;
725  s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
726  s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
727 
728  s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
729  s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
730  s->block_offset[1] = s->block_offset[0] + 8;
731  s->block_offset[3] = s->block_offset[2] + 8;
732  s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
733  s->block_offset[5] = s->block_offset[4];
734 
735  for (mb_col=0; mb_col<s->mb_width; mb_col++) {
736  if (!damaged) {
737  int ret = vp56_decode_mb(s, mb_row, mb_col, is_alpha);
738  if (ret < 0) {
739  damaged = 1;
740  if (!s->have_undamaged_frame || !avctx->error_concealment) {
741  s->discard_frame = 1;
742  return AVERROR_INVALIDDATA;
743  }
744  }
745  }
746  if (damaged)
747  vp56_conceal_mb(s, mb_row, mb_col, is_alpha);
748 
749  for (y=0; y<4; y++) {
750  s->above_block_idx[y] += 2;
751  s->block_offset[y] += 16;
752  }
753 
754  for (uv=4; uv<6; uv++) {
755  s->above_block_idx[uv] += 1;
756  s->block_offset[uv] += 8;
757  }
758  }
759  }
760 
761  if (!damaged)
762  s->have_undamaged_frame = 1;
763 
764 next:
765  if ((p->flags & AV_FRAME_FLAG_KEY) || s->golden_frame) {
766  if ((res = av_frame_replace(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
767  return res;
768  }
769 
771  FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT],
772  s->frames[VP56_FRAME_PREVIOUS]);
773  return 0;
774 }
775 
776 av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
777  int flip, int has_alpha)
778 {
779  int i;
780 
781  s->avctx = avctx;
782  avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
783  if (avctx->skip_alpha) avctx->pix_fmt = AV_PIX_FMT_YUV420P;
784 
785  ff_h264chroma_init(&s->h264chroma, 8);
786  ff_hpeldsp_init(&s->hdsp, avctx->flags);
787  ff_videodsp_init(&s->vdsp, 8);
788  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
789  for (i = 0; i < 64; i++) {
790 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
791  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
792 #undef TRANSPOSE
793  }
794 
795  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
796  s->frames[i] = av_frame_alloc();
797  if (!s->frames[i])
798  return AVERROR(ENOMEM);
799  }
800  s->edge_emu_buffer_alloc = NULL;
801 
802  s->above_blocks = NULL;
803  s->macroblocks = NULL;
804  s->quantizer = -1;
805  s->deblock_filtering = 1;
806  s->golden_frame = 0;
807 
808  s->filter = NULL;
809 
810  s->has_alpha = has_alpha;
811 
812  s->modelp = &s->model;
813 
814  if (flip) {
815  s->flip = -1;
816  s->frbi = 2;
817  s->srbi = 0;
818  } else {
819  s->flip = 1;
820  s->frbi = 0;
821  s->srbi = 2;
822  }
823 
824  return 0;
825 }
826 
827 av_cold int ff_vp56_free_context(VP56Context *s)
828 {
829  int i;
830 
831  av_freep(&s->above_blocks);
832  av_freep(&s->macroblocks);
833  av_freep(&s->edge_emu_buffer_alloc);
834 
835  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
836  av_frame_free(&s->frames[i]);
837 
838  return 0;
839 }
vp56_rac_get_tree
static av_always_inline int vp56_rac_get_tree(VPXRangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
Definition: vp56.h:243
vp56_add_predictors_dc
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
Definition: vp56.c:282
vp56_conceal_mb
static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:510
vp56_parse_mb_type_models
static void vp56_parse_mb_type_models(VP56Context *s)
Definition: vp56.c:82
VP56mv::x
int16_t x
Definition: vp56.h:68
vp56data.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
VP56_MB_INTER_DELTA_GF
@ VP56_MB_INTER_DELTA_GF
Inter MB, above/left vector + delta, from golden frame.
Definition: vp56.h:56
ff_vp3dsp_idct10_put
void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:341
ff_vp3dsp_idct10_add
void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:347
VP56Model::mb_types_stats
uint8_t mb_types_stats[3][10][2]
Definition: vp56.h:112
VP56_MB_INTER_V2_GF
@ VP56_MB_INTER_V2_GF
Inter MB, second vector, from golden frame.
Definition: vp56.h:59
ff_vp56_decode_mbs
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
Definition: vp56.c:660
vp56_conceal_mv
static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
Definition: vp56.c:263
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
ff_vp56_pmbtm_tree
const VP56Tree ff_vp56_pmbtm_tree[]
Definition: vp56data.c:219
ff_vp56_reference_frame
const VP56Frame ff_vp56_reference_frame[]
Definition: vp56data.c:70
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
vp56_decode_4mv
static void vp56_decode_4mv(VP56Context *s, int row, int col)
Definition: vp56.c:165
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVPacket::data
uint8_t * data
Definition: packet.h:522
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
ff_vp56_init_context
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
Initializes an VP56Context.
Definition: vp56.c:776
VP56RefDc::ref_frame
VP56Frame ref_frame
Definition: vp56.h:88
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
ff_vp56_pmbt_tree
const VP56Tree ff_vp56_pmbt_tree[]
Definition: vp56data.c:228
TRANSPOSE
#define TRANSPOSE(x)
ff_vp56_filter_threshold
const uint8_t ff_vp56_filter_threshold[]
Definition: vp56data.c:204
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:612
vp56_size_changed
static int vp56_size_changed(VP56Context *s)
Definition: vp56.c:524
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
VPXRangeCoder
Definition: vpx_rac.h:35
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP56_MB_INTRA
@ VP56_MB_INTRA
Intra MB.
Definition: vp56.h:51
VP56RefDc
Definition: vp56.h:86
VP56_MB_INTER_NOVEC_PF
@ VP56_MB_INTER_NOVEC_PF
Inter MB, no vector, from previous frame.
Definition: vp56.h:50
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
ff_vp56_b6to4
const uint8_t ff_vp56_b6to4[]
Definition: vp56data.c:29
vp56_deblock_filter
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, ptrdiff_t stride, int dx, int dy)
Definition: vp56.c:323
VP56_MB_INTER_V1_PF
@ VP56_MB_INTER_V1_PF
Inter MB, first vector, from previous frame.
Definition: vp56.h:53
vp56_idct_add
static void vp56_idct_add(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:418
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
VP56_SIZE_CHANGE
#define VP56_SIZE_CHANGE
Definition: vp56.h:72
VP56RefDc::dc_coeff
int16_t dc_coeff
Definition: vp56.h:89
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_vp56_dc_dequant
const uint8_t ff_vp56_dc_dequant[64]
Definition: vp56data.c:94
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
VP56Model::mb_type
uint8_t mb_type[3][10][10]
Definition: vp56.h:111
mask
static const uint16_t mask[17]
Definition: lzw.c:38
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
vp56_mc
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, ptrdiff_t stride, int x, int y)
Definition: vp56.c:339
vp56_get_vectors_predictors
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
Definition: vp56.c:44
VP56_MB_INTER_4V
@ VP56_MB_INTER_4V
Inter MB, 4 vectors, from previous frame.
Definition: vp56.h:57
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
AVCodecContext::error_concealment
int error_concealment
error concealment flags
Definition: avcodec.h:1386
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
ff_vp56_free_context
av_cold int ff_vp56_free_context(VP56Context *s)
Definition: vp56.c:827
VP56mv::y
int16_t y
Definition: vp56.h:69
VP56mv
Definition: vp56.h:67
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:54
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
vp56.h
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
NULL
#define NULL
Definition: coverity.c:32
VP56_MB_INTER_DELTA_PF
@ VP56_MB_INTER_DELTA_PF
Inter MB, above/left vector + delta, from previous frame.
Definition: vp56.h:52
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_vp56_mb_type_model_model
const uint8_t ff_vp56_mb_type_model_model[]
Definition: vp56data.c:215
VP56_MB_INTER_NOVEC_GF
@ VP56_MB_INTER_NOVEC_GF
Inter MB, no vector, from golden frame.
Definition: vp56.h:55
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vp56_pre_def_mb_type_stats
const uint8_t ff_vp56_pre_def_mb_type_stats[16][3][10][2]
Definition: vp56data.c:105
VP56_MB_INTER_V2_PF
@ VP56_MB_INTER_V2_PF
Inter MB, second vector, from previous frame.
Definition: vp56.h:54
vp56_parse_mb_type
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
Definition: vp56.c:153
vp56_render_mb
static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
Definition: vp56.c:428
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
mvp
static void mvp(const VVCLocalContext *lc, const int mvp_lx_flag, const int lx, const int8_t *ref_idx, const int amvr_shift, Mv *mv)
Definition: vvc_mvs.c:1567
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1568
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:132
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
h264chroma.h
AVCodecContext::skip_alpha
int skip_alpha
Skip processing alpha if supported by codec.
Definition: avcodec.h:1833
VP56_FRAME_NONE
@ VP56_FRAME_NONE
Definition: vp56.h:43
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: vvcdec.c:552
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate an array through a pointer to a pointer.
Definition: mem.c:223
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
vp56_rac_gets
static int vp56_rac_gets(VPXRangeCoder *c, int bits)
vp56 specific range coder implementation
Definition: vp56.h:224
VP56_MB_INTER_V1_GF
@ VP56_MB_INTER_V1_GF
Inter MB, first vector, from golden frame.
Definition: vp56.h:58
AV_CODEC_ID_VP5
@ AV_CODEC_ID_VP5
Definition: codec_id.h:142
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
vpx_rac.h
VP56_FRAME_GOLDEN
@ VP56_FRAME_GOLDEN
Definition: vp56.h:46
delta
float delta
Definition: vorbis_enc_data.h:430
av_always_inline
#define av_always_inline
Definition: attributes.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
VP56mb
VP56mb
Definition: vp56.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
VP56Frame
VP56Frame
Definition: vp56.h:42
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
pos
unsigned int pos
Definition: spdifenc.c:413
ff_vp56_decode_frame
int ff_vp56_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: vp56.c:569
ff_vp56_candidate_predictor_pos
const int8_t ff_vp56_candidate_predictor_pos[12][2]
Definition: vp56data.c:241
VP56_FRAME_CURRENT
@ VP56_FRAME_CURRENT
Definition: vp56.h:44
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:411
AVCodecContext
main external API structure.
Definition: avcodec.h:445
ff_vp56_b2p
const uint8_t ff_vp56_b2p[]
Definition: vp56data.c:28
VP56Model
Definition: vp56.h:97
VP56_FRAME_PREVIOUS
@ VP56_FRAME_PREVIOUS
Definition: vp56.h:45
ff_vp56_init_dequant
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
Definition: vp56.c:35
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_vp56_ac_dequant
const uint8_t ff_vp56_ac_dequant[64]
Definition: vp56data.c:83
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
vpx_rac_get
static av_always_inline int vpx_rac_get(VPXRangeCoder *c)
Definition: vpx_rac.h:117
s0
#define s0
Definition: regdef.h:37
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:499
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
vp56_idct_put
static void vp56_idct_put(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:410
vp56_decode_mv
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
Definition: vp56.c:206
vp56_decode_mb
static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:491
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1631