FFmpeg
snowdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/intmath.h"
22 #include "libavutil/log.h"
23 #include "libavutil/opt.h"
24 #include "avcodec.h"
25 #include "snow_dwt.h"
26 #include "internal.h"
27 #include "snow.h"
28 
29 #include "rangecoder.h"
30 #include "mathops.h"
31 
32 #include "h263.h"
33 
34 static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
35  Plane *p= &s->plane[plane_index];
36  const int mb_w= s->b_width << s->block_max_depth;
37  const int mb_h= s->b_height << s->block_max_depth;
38  int x, y, mb_x;
39  int block_size = MB_SIZE >> s->block_max_depth;
40  int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
41  int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
42  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
43  int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
44  int ref_stride= s->current_picture->linesize[plane_index];
45  uint8_t *dst8= s->current_picture->data[plane_index];
46  int w= p->width;
47  int h= p->height;
48 
49  if(s->keyframe || (s->avctx->debug&512)){
50  if(mb_y==mb_h)
51  return;
52 
53  if(add){
54  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
55 // DWTELEM * line = slice_buffer_get_line(sb, y);
56  IDWTELEM * line = sb->line[y];
57  for(x=0; x<w; x++){
58 // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
59  int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
60  v >>= FRAC_BITS;
61  if(v&(~255)) v= ~(v>>31);
62  dst8[x + y*ref_stride]= v;
63  }
64  }
65  }else{
66  for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
67 // DWTELEM * line = slice_buffer_get_line(sb, y);
68  IDWTELEM * line = sb->line[y];
69  for(x=0; x<w; x++){
70  line[x] -= 128 << FRAC_BITS;
71 // buf[x + y*w]-= 128<<FRAC_BITS;
72  }
73  }
74  }
75 
76  return;
77  }
78 
79  for(mb_x=0; mb_x<=mb_w; mb_x++){
80  add_yblock(s, 1, sb, old_buffer, dst8, obmc,
81  block_w*mb_x - block_w/2,
82  block_h*mb_y - block_h/2,
83  block_w, block_h,
84  w, h,
85  w, ref_stride, obmc_stride,
86  mb_x - 1, mb_y - 1,
87  add, 0, plane_index);
88  }
89 
90  if(s->avmv && mb_y < mb_h && plane_index == 0)
91  for(mb_x=0; mb_x<mb_w; mb_x++){
92  AVMotionVector *avmv = s->avmv + s->avmv_index;
93  const int b_width = s->b_width << s->block_max_depth;
94  const int b_stride= b_width;
95  BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
96 
97  if (bn->type)
98  continue;
99 
100  s->avmv_index++;
101 
102  avmv->w = block_w;
103  avmv->h = block_h;
104  avmv->dst_x = block_w*mb_x - block_w/2;
105  avmv->dst_y = block_h*mb_y - block_h/2;
106  avmv->motion_scale = 8;
107  avmv->motion_x = bn->mx * s->mv_scale;
108  avmv->motion_y = bn->my * s->mv_scale;
109  avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
110  avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
111  avmv->source= -1 - bn->ref;
112  avmv->flags = 0;
113  }
114 }
115 
116 static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
117  const int w= b->width;
118  int y;
119  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
120  int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
121  int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
122  int new_index = 0;
123 
124  if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
125  qadd= 0;
126  qmul= 1<<QEXPSHIFT;
127  }
128 
129  /* If we are on the second or later slice, restore our index. */
130  if (start_y != 0)
131  new_index = save_state[0];
132 
133 
134  for(y=start_y; y<h; y++){
135  int x = 0;
136  int v;
137  IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
138  memset(line, 0, b->width*sizeof(IDWTELEM));
139  v = b->x_coeff[new_index].coeff;
140  x = b->x_coeff[new_index++].x;
141  while(x < w){
142  register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
143  register int u= -(v&1);
144  line[x] = (t^u) - u;
145 
146  v = b->x_coeff[new_index].coeff;
147  x = b->x_coeff[new_index++].x;
148  }
149  }
150 
151  /* Save our variables for the next slice. */
152  save_state[0] = new_index;
153 
154  return;
155 }
156 
157 static int decode_q_branch(SnowContext *s, int level, int x, int y){
158  const int w= s->b_width << s->block_max_depth;
159  const int rem_depth= s->block_max_depth - level;
160  const int index= (x + y*w) << rem_depth;
161  int trx= (x+1)<<rem_depth;
162  const BlockNode *left = x ? &s->block[index-1] : &null_block;
163  const BlockNode *top = y ? &s->block[index-w] : &null_block;
164  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
165  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
166  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
167  int res;
168 
169  if(s->keyframe){
171  return 0;
172  }
173 
174  if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
175  int type, mx, my;
176  int l = left->color[0];
177  int cb= left->color[1];
178  int cr= left->color[2];
179  unsigned ref = 0;
180  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
181  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
182  int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
183 
184  type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
185  if(type){
186  int ld, cbd, crd;
187  pred_mv(s, &mx, &my, 0, left, top, tr);
188  ld = get_symbol(&s->c, &s->block_state[32], 1);
189  if (ld < -255 || ld > 255) {
190  return AVERROR_INVALIDDATA;
191  }
192  l += ld;
193  if (s->nb_planes > 2) {
194  cbd = get_symbol(&s->c, &s->block_state[64], 1);
195  crd = get_symbol(&s->c, &s->block_state[96], 1);
196  if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
197  return AVERROR_INVALIDDATA;
198  }
199  cb += cbd;
200  cr += crd;
201  }
202  }else{
203  if(s->ref_frames > 1)
204  ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
205  if (ref >= s->ref_frames) {
206  av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
207  return AVERROR_INVALIDDATA;
208  }
209  pred_mv(s, &mx, &my, ref, left, top, tr);
210  mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
211  my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
212  }
213  set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
214  }else{
215  if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
216  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
217  (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
218  (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
219  return res;
220  }
221  return 0;
222 }
223 
224 static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
225  const int w= b->width;
226  const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
227  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
228  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
229  int x,y;
230 
231  if(s->qlog == LOSSLESS_QLOG) return;
232 
233  for(y=start_y; y<end_y; y++){
234 // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
235  IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
236  for(x=0; x<w; x++){
237  int i= line[x];
238  if(i<0){
239  line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
240  }else if(i>0){
241  line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
242  }
243  }
244  }
245 }
246 
247 static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
248  const int w= b->width;
249  int x,y;
250 
251  IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
252  IDWTELEM * prev;
253 
254  if (start_y != 0)
255  line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
256 
257  for(y=start_y; y<end_y; y++){
258  prev = line;
259 // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
260  line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
261  for(x=0; x<w; x++){
262  if(x){
263  if(use_median){
264  if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
265  else line[x] += line[x - 1];
266  }else{
267  if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
268  else line[x] += line[x - 1];
269  }
270  }else{
271  if(y) line[x] += prev[x];
272  }
273  }
274  }
275 }
276 
277 static void decode_qlogs(SnowContext *s){
278  int plane_index, level, orientation;
279 
280  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
281  for(level=0; level<s->spatial_decomposition_count; level++){
282  for(orientation=level ? 1:0; orientation<4; orientation++){
283  int q;
284  if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
285  else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
286  else q= get_symbol(&s->c, s->header_state, 1);
287  s->plane[plane_index].band[level][orientation].qlog= q;
288  }
289  }
290  }
291 }
292 
293 #define GET_S(dst, check) \
294  tmp= get_symbol(&s->c, s->header_state, 0);\
295  if(!(check)){\
296  av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
297  return AVERROR_INVALIDDATA;\
298  }\
299  dst= tmp;
300 
302  int plane_index, tmp;
303  uint8_t kstate[32];
304 
305  memset(kstate, MID_STATE, sizeof(kstate));
306 
307  s->keyframe= get_rac(&s->c, kstate);
308  if(s->keyframe || s->always_reset){
310  s->spatial_decomposition_type=
311  s->qlog=
312  s->qbias=
313  s->mv_scale=
314  s->block_max_depth= 0;
315  }
316  if(s->keyframe){
317  GET_S(s->version, tmp <= 0U)
318  s->always_reset= get_rac(&s->c, s->header_state);
319  s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
320  s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
321  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
322  s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
323  if (s->colorspace_type == 1) {
324  s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
325  s->nb_planes = 1;
326  } else if(s->colorspace_type == 0) {
327  s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
328  s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
329 
330  if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
331  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
332  }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
333  s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
334  }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
335  s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
336  } else {
337  av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
338  s->chroma_h_shift = s->chroma_v_shift = 1;
339  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
340  return AVERROR_INVALIDDATA;
341  }
342  s->nb_planes = 3;
343  } else {
344  av_log(s, AV_LOG_ERROR, "unsupported color space\n");
345  s->chroma_h_shift = s->chroma_v_shift = 1;
346  s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
347  return AVERROR_INVALIDDATA;
348  }
349 
350 
351  s->spatial_scalability= get_rac(&s->c, s->header_state);
352 // s->rate_scalability= get_rac(&s->c, s->header_state);
353  GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
354  s->max_ref_frames++;
355 
356  decode_qlogs(s);
357  }
358 
359  if(!s->keyframe){
360  if(get_rac(&s->c, s->header_state)){
361  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
362  int htaps, i, sum=0;
363  Plane *p= &s->plane[plane_index];
364  p->diag_mc= get_rac(&s->c, s->header_state);
365  htaps= get_symbol(&s->c, s->header_state, 0);
366  if((unsigned)htaps >= HTAPS_MAX/2 - 1)
367  return AVERROR_INVALIDDATA;
368  htaps = htaps*2 + 2;
369  p->htaps= htaps;
370  for(i= htaps/2; i; i--){
371  unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
372  if (hcoeff > 127)
373  return AVERROR_INVALIDDATA;
374  p->hcoeff[i]= hcoeff * (1-2*(i&1));
375  sum += p->hcoeff[i];
376  }
377  p->hcoeff[0]= 32-sum;
378  }
379  s->plane[2].diag_mc= s->plane[1].diag_mc;
380  s->plane[2].htaps = s->plane[1].htaps;
381  memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
382  }
383  if(get_rac(&s->c, s->header_state)){
384  GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
385  decode_qlogs(s);
386  }
387  }
388 
389  s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
390  if(s->spatial_decomposition_type > 1U){
391  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
392  return AVERROR_INVALIDDATA;
393  }
394  if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
395  s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
396  av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
397  return AVERROR_INVALIDDATA;
398  }
399  if (s->avctx->width > 65536-4) {
400  av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
401  return AVERROR_INVALIDDATA;
402  }
403 
404 
405  s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
406  s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
407  s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
408  s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
409  if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
410  av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
411  s->block_max_depth= 0;
412  s->mv_scale = 0;
413  return AVERROR_INVALIDDATA;
414  }
415  if (FFABS(s->qbias) > 127) {
416  av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
417  s->qbias = 0;
418  return AVERROR_INVALIDDATA;
419  }
420 
421  return 0;
422 }
423 
425  int x, y;
426  int w= s->b_width;
427  int h= s->b_height;
428  int res;
429 
430  for(y=0; y<h; y++){
431  for(x=0; x<w; x++){
432  if (s->c.bytestream >= s->c.bytestream_end)
433  return AVERROR_INVALIDDATA;
434  if ((res = decode_q_branch(s, 0, x, y)) < 0)
435  return res;
436  }
437  }
438  return 0;
439 }
440 
441 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
442  AVPacket *avpkt)
443 {
444  const uint8_t *buf = avpkt->data;
445  int buf_size = avpkt->size;
446  SnowContext *s = avctx->priv_data;
447  RangeCoder * const c= &s->c;
448  int bytes_read;
449  AVFrame *picture = data;
450  int level, orientation, plane_index;
451  int res;
452 
453  ff_init_range_decoder(c, buf, buf_size);
454  ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
455 
456  s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
457  if ((res = decode_header(s)) < 0)
458  return res;
459  if ((res=ff_snow_common_init_after_header(avctx)) < 0)
460  return res;
461 
462  // realloc slice buffer for the case that spatial_decomposition_count changed
464  if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
465  (MB_SIZE >> s->block_max_depth) +
466  s->spatial_decomposition_count * 11 + 1,
467  s->plane[0].width,
468  s->spatial_idwt_buffer)) < 0)
469  return res;
470 
471  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
472  Plane *p= &s->plane[plane_index];
473  p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
474  && p->hcoeff[1]==-10
475  && p->hcoeff[2]==2;
476  }
477 
479 
480  if((res = ff_snow_frame_start(s)) < 0)
481  return res;
482 
483  s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
484 
485  //keyframe flag duplication mess FIXME
486  if(avctx->debug&FF_DEBUG_PICT_INFO)
487  av_log(avctx, AV_LOG_ERROR,
488  "keyframe:%d qlog:%d qbias: %d mvscale: %d "
489  "decomposition_type:%d decomposition_count:%d\n",
490  s->keyframe, s->qlog, s->qbias, s->mv_scale,
491  s->spatial_decomposition_type,
492  s->spatial_decomposition_count
493  );
494 
495  if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
496  size_t size;
497  res = av_size_mult(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2), &size);
498  if (res)
499  return res;
500  av_fast_malloc(&s->avmv, &s->avmv_size, size);
501  if (!s->avmv)
502  return AVERROR(ENOMEM);
503  } else {
504  s->avmv_size = 0;
505  av_freep(&s->avmv);
506  }
507  s->avmv_index = 0;
508 
509  if ((res = decode_blocks(s)) < 0)
510  return res;
511 
512  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
513  Plane *p= &s->plane[plane_index];
514  int w= p->width;
515  int h= p->height;
516  int x, y;
517  int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
518 
519  if(s->avctx->debug&2048){
520  memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
521  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
522 
523  for(y=0; y<h; y++){
524  for(x=0; x<w; x++){
525  int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
526  s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
527  }
528  }
529  }
530 
531  for(level=0; level<s->spatial_decomposition_count; level++){
532  for(orientation=level ? 1 : 0; orientation<4; orientation++){
533  SubBand *b= &p->band[level][orientation];
534  unpack_coeffs(s, b, b->parent, orientation);
535  }
536  }
537 
538  {
539  const int mb_h= s->b_height << s->block_max_depth;
540  const int block_size = MB_SIZE >> s->block_max_depth;
541  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
542  int mb_y;
544  int yd=0, yq=0;
545  int y;
546  int end_y;
547 
548  ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
549  for(mb_y=0; mb_y<=mb_h; mb_y++){
550 
551  int slice_starty = block_h*mb_y;
552  int slice_h = block_h*(mb_y+1);
553 
554  if (!(s->keyframe || s->avctx->debug&512)){
555  slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
556  slice_h -= (block_h >> 1);
557  }
558 
559  for(level=0; level<s->spatial_decomposition_count; level++){
560  for(orientation=level ? 1 : 0; orientation<4; orientation++){
561  SubBand *b= &p->band[level][orientation];
562  int start_y;
563  int end_y;
564  int our_mb_start = mb_y;
565  int our_mb_end = (mb_y + 1);
566  const int extra= 3;
567  start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
568  end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
569  if (!(s->keyframe || s->avctx->debug&512)){
570  start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
571  end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
572  }
573  start_y = FFMIN(b->height, start_y);
574  end_y = FFMIN(b->height, end_y);
575 
576  if (start_y != end_y){
577  if (orientation == 0){
578  SubBand * correlate_band = &p->band[0][0];
579  int correlate_end_y = FFMIN(b->height, end_y + 1);
580  int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
581  decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
582  correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
583  dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
584  }
585  else
586  decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
587  }
588  }
589  }
590 
591  for(; yd<slice_h; yd+=4){
592  ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
593  }
594 
595  if(s->qlog == LOSSLESS_QLOG){
596  for(; yq<slice_h && yq<h; yq++){
597  IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
598  for(x=0; x<w; x++){
599  line[x] *= 1<<FRAC_BITS;
600  }
601  }
602  }
603 
604  predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
605 
606  y = FFMIN(p->height, slice_starty);
607  end_y = FFMIN(p->height, slice_h);
608  while(y < end_y)
609  ff_slice_buffer_release(&s->sb, y++);
610  }
611 
612  ff_slice_buffer_flush(&s->sb);
613  }
614 
615  }
616 
617  emms_c();
618 
619  ff_snow_release_buffer(avctx);
620 
621  if(!(s->avctx->debug&2048))
622  res = av_frame_ref(picture, s->current_picture);
623  else
624  res = av_frame_ref(picture, s->mconly_picture);
625  if (res >= 0 && s->avmv_index) {
626  AVFrameSideData *sd;
627 
628  sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
629  if (!sd)
630  return AVERROR(ENOMEM);
631  memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
632  }
633 
634  if (res < 0)
635  return res;
636 
637  *got_frame = 1;
638 
639  bytes_read= c->bytestream - c->bytestream_start;
640  if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
641 
642  return bytes_read;
643 }
644 
646 {
647  SnowContext *s = avctx->priv_data;
648 
650 
652 
653  s->avmv_size = 0;
654  av_freep(&s->avmv);
655 
656  return 0;
657 }
658 
660  .name = "snow",
661  .long_name = NULL_IF_CONFIG_SMALL("Snow"),
662  .type = AVMEDIA_TYPE_VIDEO,
663  .id = AV_CODEC_ID_SNOW,
664  .priv_data_size = sizeof(SnowContext),
666  .close = decode_end,
667  .decode = decode_frame,
668  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
669  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
671 };
av_size_mult
int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.c:570
BlockNode::color
uint8_t color[3]
Color for intra.
Definition: snow.h:55
AVMotionVector::motion_scale
uint16_t motion_scale
Definition: motion_vector.h:54
decode_q_branch
static int decode_q_branch(SnowContext *s, int level, int x, int y)
Definition: snowdec.c:157
AVCodec
AVCodec.
Definition: codec.h:202
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:463
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:508
GET_S
#define GET_S(dst, check)
Definition: snowdec.c:293
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
Plane::fast_mc
int fast_mc
Definition: snow.h:107
MID_STATE
#define MID_STATE
Definition: snow.h:40
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
AVMotionVector
Definition: motion_vector.h:24
AVMotionVector::src_x
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
ff_slice_buffer_flush
void ff_slice_buffer_flush(slice_buffer *buf)
Definition: snow_dwt.c:91
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:164
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
index
fg index
Definition: ffmpeg_filter.c:167
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
rangecoder.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
SnowContext
Definition: snow.h:114
QSHIFT
#define QSHIFT
Definition: snow.h:43
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:47
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:690
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1303
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: snowdec.c:441
Plane::diag_mc
int diag_mc
Definition: snow.h:106
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:56
init
static int init
Definition: av_tx.c:47
U
#define U(x)
Definition: vp56_arith.h:37
BlockNode
Definition: snow.h:51
ff_slice_buffer_init
int ff_slice_buffer_init(slice_buffer *buf, int line_count, int max_allocated_lines, int line_width, IDWTELEM *base_buffer)
Definition: snow_dwt.c:28
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVMotionVector::motion_x
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
Definition: motion_vector.h:53
AVMotionVector::src_y
int16_t src_y
Definition: motion_vector.h:38
DWTCompose
Definition: dirac_dwt.h:32
ff_slice_buffer_destroy
void ff_slice_buffer_destroy(slice_buffer *buf)
Definition: snow_dwt.c:103
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:511
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_spatial_idwt_buffered_init
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width, int height, int stride_line, int type, int decomposition_count)
Definition: snow_dwt.c:639
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
width
#define width
AVMotionVector::h
uint8_t h
Definition: motion_vector.h:34
s
#define s(width, name)
Definition: cbs_vp9.c:257
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:45
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:281
htaps
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
Definition: dsd.c:52
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:429
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
correlate_slice_buffered
static void correlate_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y)
Definition: snowdec.c:247
get_symbol
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
Definition: ffv1dec.c:65
predict_slice_buffered
static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *sb, IDWTELEM *old_buffer, int plane_index, int add, int mb_y)
Definition: snowdec.c:34
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:53
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:637
mathops.h
AVMotionVector::motion_y
int32_t motion_y
Definition: motion_vector.h:53
QROOT
#define QROOT
Definition: snow.h:44
decode_subband_slice_buffered
static void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer *sb, int start_y, int h, int save_state[1])
Definition: snowdec.c:116
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:55
ff_init_range_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: rangecoder.c:53
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:110
Plane::height
int height
Definition: cfhd.h:120
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
ff_slice_buffer_release
void ff_slice_buffer_release(slice_buffer *buf, int line)
Definition: snow_dwt.c:78
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
AVMotionVector::flags
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
SubBand
Definition: cfhd.h:109
Plane::htaps
int htaps
Definition: snow.h:104
Plane::width
int width
Definition: cfhd.h:119
line
Definition: graph2dot.c:48
snow_dwt.h
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:105
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
unpack_coeffs
static void unpack_coeffs(SnowContext *s, SubBand *b, SubBand *parent, int orientation)
Definition: snow.h:604
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
decode_qlogs
static void decode_qlogs(SnowContext *s)
Definition: snowdec.c:277
AVMotionVector::dst_y
int16_t dst_y
Definition: motion_vector.h:42
log.h
dequantize_slice_buffered
static void dequantize_slice_buffered(SnowContext *s, slice_buffer *sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y)
Definition: snowdec.c:224
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1390
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
AVMotionVector::dst_x
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:58
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:99
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:456
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
ff_snow_frame_start
int ff_snow_frame_start(SnowContext *s)
Definition: snow.c:652
get_rac
static int get_rac(RangeCoder *c, uint8_t *const state)
Definition: rangecoder.h:127
avcodec.h
mid_pred
#define mid_pred
Definition: mathops.h:97
decode_header
static int decode_header(SnowContext *s)
Definition: snowdec.c:301
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:97
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:262
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
inverse
static int inverse(AudioFWTDNContext *s, double **in, int *in_length, double *out, int out_length, int ch, uint64_t sn)
Definition: af_afwtdn.c:762
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVMotionVector::source
int32_t source
Where the current macroblock comes from; negative value when it comes from the past,...
Definition: motion_vector.h:30
Plane
Definition: cfhd.h:118
ff_snow_decoder
const AVCodec ff_snow_decoder
Definition: snowdec.c:659
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1302
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:89
BlockNode::level
uint8_t level
Definition: snow.h:61
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
AV_CODEC_EXPORT_DATA_MVS
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:342
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:131
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:52
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:76
null_block
static const BlockNode null_block
Definition: snow.h:64
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:61
ff_spatial_idwt_buffered_slice
void ff_spatial_idwt_buffered_slice(SnowDWTContext *dsp, DWTCompose *cs, slice_buffer *slice_buf, IDWTELEM *temp, int width, int height, int stride_line, int type, int decomposition_count, int y)
Definition: snow_dwt.c:658
decode_blocks
static int decode_blocks(SnowContext *s)
Definition: snowdec.c:424
int
int
Definition: ffmpeg_filter.c:153
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:111
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: snowdec.c:645
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:54
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
AVMotionVector::w
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
h263.h
intmath.h