FFmpeg
snowdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/intmath.h"
22 #include "libavutil/log.h"
23 #include "libavutil/opt.h"
24 #include "avcodec.h"
25 #include "codec_internal.h"
26 #include "snow_dwt.h"
27 #include "snow.h"
28 
29 #include "rangecoder.h"
30 #include "mathops.h"
31 
32 static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
33  Plane *p= &s->plane[plane_index];
34  const int mb_w= s->b_width << s->block_max_depth;
35  const int mb_h= s->b_height << s->block_max_depth;
36  int x, y, mb_x;
37  int block_size = MB_SIZE >> s->block_max_depth;
38  int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
39  int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
40  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
41  int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
42  int ref_stride= s->current_picture->linesize[plane_index];
43  uint8_t *dst8= s->current_picture->data[plane_index];
44  int w= p->width;
45  int h= p->height;
46 
47  if(s->keyframe || (s->avctx->debug&512)){
48  if(mb_y==mb_h)
49  return;
50 
51  if(add){
52  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
53 // DWTELEM * line = slice_buffer_get_line(sb, y);
54  IDWTELEM * line = sb->line[y];
55  for(x=0; x<w; x++){
56 // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
57  int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
58  v >>= FRAC_BITS;
59  if(v&(~255)) v= ~(v>>31);
60  dst8[x + y*ref_stride]= v;
61  }
62  }
63  }else{
64  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
65 // DWTELEM * line = slice_buffer_get_line(sb, y);
66  IDWTELEM * line = sb->line[y];
67  for(x=0; x<w; x++){
68  line[x] -= 128 << FRAC_BITS;
69 // buf[x + y*w]-= 128<<FRAC_BITS;
70  }
71  }
72  }
73 
74  return;
75  }
76 
77  for(mb_x=0; mb_x<=mb_w; mb_x++){
78  add_yblock(s, 1, sb, old_buffer, dst8, obmc,
79  block_w*mb_x - block_w/2,
80  block_h*mb_y - block_h/2,
81  block_w, block_h,
82  w, h,
83  w, ref_stride, obmc_stride,
84  mb_x - 1, mb_y - 1,
85  add, 0, plane_index);
86  }
87 
88  if(s->avmv && mb_y < mb_h && plane_index == 0)
89  for(mb_x=0; mb_x<mb_w; mb_x++){
90  AVMotionVector *avmv = s->avmv + s->avmv_index;
91  const int b_width = s->b_width << s->block_max_depth;
92  const int b_stride= b_width;
93  BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
94 
95  if (bn->type)
96  continue;
97 
98  s->avmv_index++;
99 
100  avmv->w = block_w;
101  avmv->h = block_h;
102  avmv->dst_x = block_w*mb_x - block_w/2;
103  avmv->dst_y = block_h*mb_y - block_h/2;
104  avmv->motion_scale = 8;
105  avmv->motion_x = bn->mx * s->mv_scale;
106  avmv->motion_y = bn->my * s->mv_scale;
107  avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
108  avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
109  avmv->source= -1 - bn->ref;
110  avmv->flags = 0;
111  }
112 }
113 
114 static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
115  const int w= b->width;
116  int y;
117  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
118  int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
119  int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
120  int new_index = 0;
121 
122  if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
123  qadd= 0;
124  qmul= 1<<QEXPSHIFT;
125  }
126 
127  /* If we are on the second or later slice, restore our index. */
128  if (start_y != 0)
129  new_index = save_state[0];
130 
131 
132  for(y=start_y; y<h; y++){
133  int x = 0;
134  int v;
135  IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
136  memset(line, 0, b->width*sizeof(IDWTELEM));
137  v = b->x_coeff[new_index].coeff;
138  x = b->x_coeff[new_index++].x;
139  while(x < w){
140  register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
141  register int u= -(v&1);
142  line[x] = (t^u) - u;
143 
144  v = b->x_coeff[new_index].coeff;
145  x = b->x_coeff[new_index++].x;
146  }
147  }
148 
149  /* Save our variables for the next slice. */
150  save_state[0] = new_index;
151 
152  return;
153 }
154 
155 static int decode_q_branch(SnowContext *s, int level, int x, int y){
156  const int w= s->b_width << s->block_max_depth;
157  const int rem_depth= s->block_max_depth - level;
158  const int index= (x + y*w) << rem_depth;
159  int trx= (x+1)<<rem_depth;
160  const BlockNode *left = x ? &s->block[index-1] : &null_block;
161  const BlockNode *top = y ? &s->block[index-w] : &null_block;
162  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
163  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
164  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
165  int res;
166 
167  if(s->keyframe){
169  return 0;
170  }
171 
172  if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
173  int type, mx, my;
174  int l = left->color[0];
175  int cb= left->color[1];
176  int cr= left->color[2];
177  unsigned ref = 0;
178  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
179  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
180  int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
181 
182  type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
183  if(type){
184  int ld, cbd, crd;
185  pred_mv(s, &mx, &my, 0, left, top, tr);
186  ld = get_symbol(&s->c, &s->block_state[32], 1);
187  if (ld < -255 || ld > 255) {
188  return AVERROR_INVALIDDATA;
189  }
190  l += ld;
191  if (s->nb_planes > 2) {
192  cbd = get_symbol(&s->c, &s->block_state[64], 1);
193  crd = get_symbol(&s->c, &s->block_state[96], 1);
194  if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
195  return AVERROR_INVALIDDATA;
196  }
197  cb += cbd;
198  cr += crd;
199  }
200  }else{
201  if(s->ref_frames > 1)
202  ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
203  if (ref >= s->ref_frames) {
204  av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
205  return AVERROR_INVALIDDATA;
206  }
207  pred_mv(s, &mx, &my, ref, left, top, tr);
208  mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
209  my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
210  }
211  set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
212  }else{
213  if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
214  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
215  (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
216  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
217  return res;
218  }
219  return 0;
220 }
221 
222 static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
223  const int w= b->width;
224  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
225  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
226  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
227  int x,y;
228 
229  if(s->qlog == LOSSLESS_QLOG) return;
230 
231  for(y=start_y; y<end_y; y++){
232 // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
233  IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
234  for(x=0; x<w; x++){
235  int i= line[x];
236  if(i<0){
237  line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
238  }else if(i>0){
239  line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
240  }
241  }
242  }
243 }
244 
245 static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
246  const int w= b->width;
247  int x,y;
248 
249  IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
250  IDWTELEM * prev;
251 
252  if (start_y != 0)
253  line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
254 
255  for(y=start_y; y<end_y; y++){
256  prev = line;
257 // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
258  line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
259  for(x=0; x<w; x++){
260  if(x){
261  if(use_median){
262  if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
263  else line[x] += line[x - 1];
264  }else{
265  if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
266  else line[x] += line[x - 1];
267  }
268  }else{
269  if(y) line[x] += prev[x];
270  }
271  }
272  }
273 }
274 
275 static void decode_qlogs(SnowContext *s){
276  int plane_index, level, orientation;
277 
278  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
279  for(level=0; level<s->spatial_decomposition_count; level++){
280  for(orientation=level ? 1:0; orientation<4; orientation++){
281  int q;
282  if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
283  else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
284  else q= get_symbol(&s->c, s->header_state, 1);
285  s->plane[plane_index].band[level][orientation].qlog= q;
286  }
287  }
288  }
289 }
290 
291 #define GET_S(dst, check) \
292  tmp= get_symbol(&s->c, s->header_state, 0);\
293  if(!(check)){\
294  av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
295  return AVERROR_INVALIDDATA;\
296  }\
297  dst= tmp;
298 
300  int plane_index, tmp;
301  uint8_t kstate[32];
302 
303  memset(kstate, MID_STATE, sizeof(kstate));
304 
305  s->keyframe= get_rac(&s->c, kstate);
306  if(s->keyframe || s->always_reset){
308  s->spatial_decomposition_type=
309  s->qlog=
310  s->qbias=
311  s->mv_scale=
312  s->block_max_depth= 0;
313  }
314  if(s->keyframe){
315  GET_S(s->version, tmp <= 0U)
316  s->always_reset= get_rac(&s->c, s->header_state);
317  s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
318  s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
319  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
320  s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
321  if (s->colorspace_type == 1) {
322  s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
323  s->nb_planes = 1;
324  } else if(s->colorspace_type == 0) {
325  s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
326  s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
327 
328  if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
329  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
330  }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
331  s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
332  }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
333  s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
334  } else {
335  av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
336  s->chroma_h_shift = s->chroma_v_shift = 1;
337  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
338  return AVERROR_INVALIDDATA;
339  }
340  s->nb_planes = 3;
341  } else {
342  av_log(s, AV_LOG_ERROR, "unsupported color space\n");
343  s->chroma_h_shift = s->chroma_v_shift = 1;
344  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
345  return AVERROR_INVALIDDATA;
346  }
347 
348 
349  s->spatial_scalability= get_rac(&s->c, s->header_state);
350 // s->rate_scalability= get_rac(&s->c, s->header_state);
351  GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
352  s->max_ref_frames++;
353 
354  decode_qlogs(s);
355  }
356 
357  if(!s->keyframe){
358  if(get_rac(&s->c, s->header_state)){
359  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
360  int htaps, i, sum=0;
361  Plane *p= &s->plane[plane_index];
362  p->diag_mc= get_rac(&s->c, s->header_state);
363  htaps= get_symbol(&s->c, s->header_state, 0);
364  if((unsigned)htaps >= HTAPS_MAX/2 - 1)
365  return AVERROR_INVALIDDATA;
366  htaps = htaps*2 + 2;
367  p->htaps= htaps;
368  for(i= htaps/2; i; i--){
369  unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
370  if (hcoeff > 127)
371  return AVERROR_INVALIDDATA;
372  p->hcoeff[i]= hcoeff * (1-2*(i&1));
373  sum += p->hcoeff[i];
374  }
375  p->hcoeff[0]= 32-sum;
376  }
377  s->plane[2].diag_mc= s->plane[1].diag_mc;
378  s->plane[2].htaps = s->plane[1].htaps;
379  memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
380  }
381  if(get_rac(&s->c, s->header_state)){
382  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
383  decode_qlogs(s);
384  }
385  }
386 
387  s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
388  if(s->spatial_decomposition_type > 1U){
389  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
390  return AVERROR_INVALIDDATA;
391  }
392  if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
393  s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
394  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
395  return AVERROR_INVALIDDATA;
396  }
397  if (s->avctx->width > 65536-4) {
398  av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
399  return AVERROR_INVALIDDATA;
400  }
401 
402 
403  s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
404  s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
405  s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
406  s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
407  if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
408  av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
409  s->block_max_depth= 0;
410  s->mv_scale = 0;
411  return AVERROR_INVALIDDATA;
412  }
413  if (FFABS(s->qbias) > 127) {
414  av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
415  s->qbias = 0;
416  return AVERROR_INVALIDDATA;
417  }
418 
419  return 0;
420 }
421 
423  int x, y;
424  int w= s->b_width;
425  int h= s->b_height;
426  int res;
427 
428  for(y=0; y<h; y++){
429  for(x=0; x<w; x++){
430  if (s->c.bytestream >= s->c.bytestream_end)
431  return AVERROR_INVALIDDATA;
432  if ((res = decode_q_branch(s, 0, x, y)) < 0)
433  return res;
434  }
435  }
436  return 0;
437 }
438 
439 static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
440  int *got_frame, AVPacket *avpkt)
441 {
442  const uint8_t *buf = avpkt->data;
443  int buf_size = avpkt->size;
444  SnowContext *s = avctx->priv_data;
445  RangeCoder * const c= &s->c;
446  int bytes_read;
447  int level, orientation, plane_index;
448  int res;
449 
450  ff_init_range_decoder(c, buf, buf_size);
451  ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
452 
453  s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
454  if ((res = decode_header(s)) < 0)
455  return res;
456  if ((res=ff_snow_common_init_after_header(avctx)) < 0)
457  return res;
458 
459  // realloc slice buffer for the case that spatial_decomposition_count changed
461  if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
462  (MB_SIZE >> s->block_max_depth) +
463  s->spatial_decomposition_count * 11 + 1,
464  s->plane[0].width,
465  s->spatial_idwt_buffer)) < 0)
466  return res;
467 
468  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
469  Plane *p= &s->plane[plane_index];
470  p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
471  && p->hcoeff[1]==-10
472  && p->hcoeff[2]==2;
473  }
474 
476 
477  if((res = ff_snow_frame_start(s)) < 0)
478  return res;
479 
480  s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
481 
482  //keyframe flag duplication mess FIXME
483  if(avctx->debug&FF_DEBUG_PICT_INFO)
484  av_log(avctx, AV_LOG_ERROR,
485  "keyframe:%d qlog:%d qbias: %d mvscale: %d "
486  "decomposition_type:%d decomposition_count:%d\n",
487  s->keyframe, s->qlog, s->qbias, s->mv_scale,
488  s->spatial_decomposition_type,
489  s->spatial_decomposition_count
490  );
491 
492  if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
493  size_t size;
494  res = av_size_mult(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2), &size);
495  if (res)
496  return res;
497  av_fast_malloc(&s->avmv, &s->avmv_size, size);
498  if (!s->avmv)
499  return AVERROR(ENOMEM);
500  } else {
501  s->avmv_size = 0;
502  av_freep(&s->avmv);
503  }
504  s->avmv_index = 0;
505 
506  if ((res = decode_blocks(s)) < 0)
507  return res;
508 
509  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
510  Plane *p= &s->plane[plane_index];
511  int w= p->width;
512  int h= p->height;
513  int x, y;
514  int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
515 
516  if(s->avctx->debug&2048){
517  memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
518  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
519 
520  for(y=0; y<h; y++){
521  for(x=0; x<w; x++){
522  int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
523  s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
524  }
525  }
526  }
527 
528  for(level=0; level<s->spatial_decomposition_count; level++){
529  for(orientation=level ? 1 : 0; orientation<4; orientation++){
530  SubBand *b= &p->band[level][orientation];
531  unpack_coeffs(s, b, b->parent, orientation);
532  }
533  }
534 
535  {
536  const int mb_h= s->b_height << s->block_max_depth;
537  const int block_size = MB_SIZE >> s->block_max_depth;
538  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
539  int mb_y;
541  int yd=0, yq=0;
542  int y;
543  int end_y;
544 
545  ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
546  for(mb_y=0; mb_y<=mb_h; mb_y++){
547 
548  int slice_starty = block_h*mb_y;
549  int slice_h = block_h*(mb_y+1);
550 
551  if (!(s->keyframe || s->avctx->debug&512)){
552  slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
553  slice_h -= (block_h >> 1);
554  }
555 
556  for(level=0; level<s->spatial_decomposition_count; level++){
557  for(orientation=level ? 1 : 0; orientation<4; orientation++){
558  SubBand *b= &p->band[level][orientation];
559  int start_y;
560  int end_y;
561  int our_mb_start = mb_y;
562  int our_mb_end = (mb_y + 1);
563  const int extra= 3;
564  start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
565  end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
566  if (!(s->keyframe || s->avctx->debug&512)){
567  start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
568  end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
569  }
570  start_y = FFMIN(b->height, start_y);
571  end_y = FFMIN(b->height, end_y);
572 
573  if (start_y != end_y){
574  if (orientation == 0){
575  SubBand * correlate_band = &p->band[0][0];
576  int correlate_end_y = FFMIN(b->height, end_y + 1);
577  int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
578  decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
579  correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
580  dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
581  }
582  else
583  decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
584  }
585  }
586  }
587 
588  for(; yd<slice_h; yd+=4){
589  ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
590  }
591 
592  if(s->qlog == LOSSLESS_QLOG){
593  for(; yq<slice_h && yq<h; yq++){
594  IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
595  for(x=0; x<w; x++){
596  line[x] *= 1<<FRAC_BITS;
597  }
598  }
599  }
600 
601  predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
602 
603  y = FFMIN(p->height, slice_starty);
604  end_y = FFMIN(p->height, slice_h);
605  while(y < end_y)
606  ff_slice_buffer_release(&s->sb, y++);
607  }
608 
609  ff_slice_buffer_flush(&s->sb);
610  }
611 
612  }
613 
614  emms_c();
615 
616  ff_snow_release_buffer(avctx);
617 
618  if(!(s->avctx->debug&2048))
619  res = av_frame_ref(picture, s->current_picture);
620  else
621  res = av_frame_ref(picture, s->mconly_picture);
622  if (res >= 0 && s->avmv_index) {
623  AVFrameSideData *sd;
624 
625  sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
626  if (!sd)
627  return AVERROR(ENOMEM);
628  memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
629  }
630 
631  if (res < 0)
632  return res;
633 
634  *got_frame = 1;
635 
636  bytes_read= c->bytestream - c->bytestream_start;
637  if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
638 
639  return bytes_read;
640 }
641 
643 {
644  SnowContext *s = avctx->priv_data;
645 
647 
649 
650  s->avmv_size = 0;
651  av_freep(&s->avmv);
652 
653  return 0;
654 }
655 
657  .p.name = "snow",
658  CODEC_LONG_NAME("Snow"),
659  .p.type = AVMEDIA_TYPE_VIDEO,
660  .p.id = AV_CODEC_ID_SNOW,
661  .priv_data_size = sizeof(SnowContext),
663  .close = decode_end,
665  .p.capabilities = AV_CODEC_CAP_DR1,
666  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
667 };
av_size_mult
int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.c:565
BlockNode::color
uint8_t color[3]
Color for intra.
Definition: snow.h:57
AVMotionVector::motion_scale
uint16_t motion_scale
Definition: motion_vector.h:54
decode_q_branch
static int decode_q_branch(SnowContext *s, int level, int x, int y)
Definition: snowdec.c:155
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:464
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:95
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:509
GET_S
#define GET_S(dst, check)
Definition: snowdec.c:291
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
Plane::fast_mc
int fast_mc
Definition: snow.h:109
MID_STATE
#define MID_STATE
Definition: snow.h:42
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:670
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AVMotionVector
Definition: motion_vector.h:24
inverse
inverse
Definition: af_crystalizer.c:122
AVMotionVector::src_x
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
ff_slice_buffer_flush
void ff_slice_buffer_flush(slice_buffer *buf)
Definition: snow_dwt.c:91
ff_snow_decoder
const FFCodec ff_snow_decoder
Definition: snowdec.c:656
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:165
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
rangecoder.h
FFCodec
Definition: codec_internal.h:127
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *avpkt)
Definition: snowdec.c:439
SnowContext
Definition: snow.h:116
QSHIFT
#define QSHIFT
Definition: snow.h:45
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:49
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:625
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1359
Plane::diag_mc
int diag_mc
Definition: snow.h:108
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:58
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
BlockNode
Definition: snow.h:53
ff_slice_buffer_init
int ff_slice_buffer_init(slice_buffer *buf, int line_count, int max_allocated_lines, int line_width, IDWTELEM *base_buffer)
Definition: snow_dwt.c:28
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVMotionVector::motion_x
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
Definition: motion_vector.h:53
AVMotionVector::src_y
int16_t src_y
Definition: motion_vector.h:38
DWTCompose
Definition: dirac_dwt.h:32
ff_slice_buffer_destroy
void ff_slice_buffer_destroy(slice_buffer *buf)
Definition: snow_dwt.c:103
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:507
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_spatial_idwt_buffered_init
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width, int height, int stride_line, int type, int decomposition_count)
Definition: snow_dwt.c:639
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
AVMotionVector::h
uint8_t h
Definition: motion_vector.h:34
s
#define s(width, name)
Definition: cbs_vp9.c:256
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:47
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:282
htaps
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
Definition: dsd.c:52
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:426
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
correlate_slice_buffered
static void correlate_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y)
Definition: snowdec.c:245
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:67
predict_slice_buffered
static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *sb, IDWTELEM *old_buffer, int plane_index, int add, int mb_y)
Definition: snowdec.c:32
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:55
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:583
mathops.h
AVMotionVector::motion_y
int32_t motion_y
Definition: motion_vector.h:53
QROOT
#define QROOT
Definition: snow.h:46
decode_subband_slice_buffered
static void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer *sb, int start_y, int h, int save_state[1])
Definition: snowdec.c:114
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:55
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:109
codec_internal.h
Plane::height
int height
Definition: cfhd.h:119
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
ff_slice_buffer_release
void ff_slice_buffer_release(slice_buffer *buf, int line)
Definition: snow_dwt.c:78
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
AVMotionVector::flags
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
SubBand
Definition: cfhd.h:108
Plane::htaps
int htaps
Definition: snow.h:106
Plane::width
int width
Definition: cfhd.h:118
line
Definition: graph2dot.c:48
snow_dwt.h
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:107
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
unpack_coeffs
static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand *parent, int orientation)
Definition: snow.h:605
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
decode_qlogs
static void decode_qlogs(SnowContext *s)
Definition: snowdec.c:275
AVMotionVector::dst_y
int16_t dst_y
Definition: motion_vector.h:42
log.h
dequantize_slice_buffered
static void dequantize_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y)
Definition: snowdec.c:222
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1391
AVMotionVector::dst_x
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:60
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:99
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:457
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
ff_snow_frame_start
int ff_snow_frame_start(SnowContext *s)
Definition: snow.c:592
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:127
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
mid_pred
#define mid_pred
Definition: mathops.h:98
decode_header
static int decode_header(SnowContext *s)
Definition: snowdec.c:299
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:94
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:266
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVMotionVector::source
int32_t source
Where the current macroblock comes from; negative value when it comes from the past,...
Definition: motion_vector.h:30
Plane
Definition: cfhd.h:117
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1358
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:89
BlockNode::level
uint8_t level
Definition: snow.h:63
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:385
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
AVPacket
This structure stores compressed data.
Definition: packet.h:351
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:78
null_block
static const BlockNode null_block
Definition: snow.h:66
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:62
ff_spatial_idwt_buffered_slice
void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs, slice_buffer *slice_buf, IDWTELEM *temp, int width, int height, int stride_line, int type, int decomposition_count, int y)
Definition: snow_dwt.c:658
decode_blocks
static int decode_blocks(SnowContext *s)
Definition: snowdec.c:422
int
int
Definition: ffmpeg_filter.c:156
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:108
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: snowdec.c:642
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:56
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
AVMotionVector::w
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
intmath.h