FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "packet_internal.h"
34 #include "qpeldsp.h"
35 #include "snow_dwt.h"
36 #include "snow.h"
37 
38 #include "rangecoder.h"
39 #include "mathops.h"
40 
41 #include "mpegvideo.h"
42 #include "h263enc.h"
43 
44 #define FF_ME_ITER 3
45 
46 typedef struct SnowEncContext {
50 
51  int lambda;
52  int lambda2;
53  int pass1_rc;
54 
55  int pred;
56  int memc_only;
62 
64  MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
65 #define ME_CACHE_SIZE 1024
68 
71 
72 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
73  uint8_t *const ref[3], uint8_t *const ref2[3],
74  int x, int y, int ref_index)
75 {
76  SnowContext *s = c->avctx->priv_data;
77  const int offset[3] = {
78  y*c-> stride + x,
79  ((y*c->uvstride + x) >> s->chroma_h_shift),
80  ((y*c->uvstride + x) >> s->chroma_h_shift),
81  };
82  for (int i = 0; i < 3; i++) {
83  c->src[0][i] = src [i];
84  c->ref[0][i] = ref [i] + offset[i];
85  }
86  av_assert2(!ref_index);
87 }
88 
89 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
90 {
91  if (v) {
92  const int a = FFABS(v);
93  const int e = av_log2(a);
94  const int el = FFMIN(e, 10);
95  int i;
96 
97  put_rac(c, state + 0, 0);
98 
99  for (i = 0; i < el; i++)
100  put_rac(c, state + 1 + i, 1); //1..10
101  for(; i < e; i++)
102  put_rac(c, state + 1 + 9, 1); //1..10
103  put_rac(c, state + 1 + FFMIN(i, 9), 0);
104 
105  for (i = e - 1; i >= el; i--)
106  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
107  for(; i >= 0; i--)
108  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
109 
110  if (is_signed)
111  put_rac(c, state + 11 + el, v < 0); //11..21
112  } else {
113  put_rac(c, state + 0, 1);
114  }
115 }
116 
117 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
118 {
119  int r = log2 >= 0 ? 1<<log2 : 1;
120 
121  av_assert2(v >= 0);
122  av_assert2(log2 >= -4);
123 
124  while (v >= r) {
125  put_rac(c, state + 4 + log2, 1);
126  v -= r;
127  log2++;
128  if (log2 > 0) r += r;
129  }
130  put_rac(c, state + 4 + log2, 0);
131 
132  for (int i = log2 - 1; i >= 0; i--)
133  put_rac(c, state + 31 - i, (v >> i) & 1);
134 }
135 
137 {
138  int ret;
139 
140  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
141  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
142 
143  ret = ff_encode_alloc_frame(s->avctx, frame);
144  if (ret < 0)
145  return ret;
146  for (int i = 0; frame->data[i]; i++) {
147  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
148  frame->linesize[i] +
149  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
150  frame->data[i] += offset;
151  }
152  frame->width = s->avctx->width;
153  frame->height = s->avctx->height;
154 
155  return 0;
156 }
157 
159 {
160  SnowEncContext *const enc = avctx->priv_data;
161  SnowContext *const s = &enc->com;
162  MpegEncContext *const mpv = &enc->m;
163  int plane_index, ret;
164  int i;
165 
166  if (enc->pred == DWT_97
167  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
168  && avctx->global_quality == 0){
169  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
170  return AVERROR(EINVAL);
171  }
172 
173  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
174 
175  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
176  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
177 
178  for(plane_index=0; plane_index<3; plane_index++){
179  s->plane[plane_index].diag_mc= 1;
180  s->plane[plane_index].htaps= 6;
181  s->plane[plane_index].hcoeff[0]= 40;
182  s->plane[plane_index].hcoeff[1]= -10;
183  s->plane[plane_index].hcoeff[2]= 2;
184  s->plane[plane_index].fast_mc= 1;
185  }
186 
187  // Must be before ff_snow_common_init()
188  ff_hpeldsp_init(&s->hdsp, avctx->flags);
189  if ((ret = ff_snow_common_init(avctx)) < 0) {
190  return ret;
191  }
192 
193 #define mcf(dx,dy)\
194  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
195  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
196  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
197  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
198  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
199  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
200 
201  mcf( 0, 0)
202  mcf( 4, 0)
203  mcf( 8, 0)
204  mcf(12, 0)
205  mcf( 0, 4)
206  mcf( 4, 4)
207  mcf( 8, 4)
208  mcf(12, 4)
209  mcf( 0, 8)
210  mcf( 4, 8)
211  mcf( 8, 8)
212  mcf(12, 8)
213  mcf( 0,12)
214  mcf( 4,12)
215  mcf( 8,12)
216  mcf(12,12)
217 
218  ff_me_cmp_init(&enc->mecc, avctx);
219  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
220 
222 
223  s->version=0;
224 
225  mpv->avctx = avctx;
226  mpv->bit_rate= avctx->bit_rate;
227  mpv->lmin = avctx->mb_lmin;
228  mpv->lmax = avctx->mb_lmax;
229  mpv->mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
230 
231  mpv->me.temp =
232  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
233  mpv->sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
234  mpv->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*mpv->me.map));
235  if (!mpv->me.scratchpad || !mpv->me.map || !mpv->sc.obmc_scratchpad)
236  return AVERROR(ENOMEM);
237  mpv->me.score_map = mpv->me.map + ME_MAP_SIZE;
238 
239  ff_h263_encode_init(mpv); //mv_penalty
240 
241  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
242 
243  if(avctx->flags&AV_CODEC_FLAG_PASS1){
244  if(!avctx->stats_out)
245  avctx->stats_out = av_mallocz(256);
246 
247  if (!avctx->stats_out)
248  return AVERROR(ENOMEM);
249  }
250  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
251  ret = ff_rate_control_init(mpv);
252  if(ret < 0)
253  return ret;
254  }
256 
257  switch(avctx->pix_fmt){
258  case AV_PIX_FMT_YUV444P:
259 // case AV_PIX_FMT_YUV422P:
260  case AV_PIX_FMT_YUV420P:
261 // case AV_PIX_FMT_YUV411P:
262  case AV_PIX_FMT_YUV410P:
263  s->nb_planes = 3;
264  s->colorspace_type= 0;
265  break;
266  case AV_PIX_FMT_GRAY8:
267  s->nb_planes = 1;
268  s->colorspace_type = 1;
269  break;
270 /* case AV_PIX_FMT_RGB32:
271  s->colorspace= 1;
272  break;*/
273  }
274 
275  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
276  &s->chroma_v_shift);
277  if (ret)
278  return ret;
279 
280  ret = ff_set_cmp(&enc->mecc, enc->mecc.me_cmp, s->avctx->me_cmp);
281  ret |= ff_set_cmp(&enc->mecc, enc->mecc.me_sub_cmp, s->avctx->me_sub_cmp);
282  if (ret < 0)
283  return AVERROR(EINVAL);
284 
285  s->input_picture = av_frame_alloc();
286  if (!s->input_picture)
287  return AVERROR(ENOMEM);
288 
289  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
290  return ret;
291 
292  if (enc->motion_est == FF_ME_ITER) {
293  int size= s->b_width * s->b_height << 2*s->block_max_depth;
294  for(i=0; i<s->max_ref_frames; i++){
295  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
296  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
297  if (!s->ref_mvs[i] || !s->ref_scores[i])
298  return AVERROR(ENOMEM);
299  }
300  }
301 
302  return 0;
303 }
304 
305 //near copy & paste from dsputil, FIXME
306 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
307 {
308  int s, i, j;
309 
310  s = 0;
311  for (i = 0; i < h; i++) {
312  for (j = 0; j < w; j++) {
313  s += pix[0];
314  pix ++;
315  }
316  pix += line_size - w;
317  }
318  return s;
319 }
320 
321 //near copy & paste from dsputil, FIXME
322 static int pix_norm1(const uint8_t * pix, int line_size, int w)
323 {
324  int s, i, j;
325  const uint32_t *sq = ff_square_tab + 256;
326 
327  s = 0;
328  for (i = 0; i < w; i++) {
329  for (j = 0; j < w; j ++) {
330  s += sq[pix[0]];
331  pix ++;
332  }
333  pix += line_size - w;
334  }
335  return s;
336 }
337 
338 static inline int get_penalty_factor(int lambda, int lambda2, int type){
339  switch(type&0xFF){
340  default:
341  case FF_CMP_SAD:
342  return lambda>>FF_LAMBDA_SHIFT;
343  case FF_CMP_DCT:
344  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
345  case FF_CMP_W53:
346  return (4*lambda)>>(FF_LAMBDA_SHIFT);
347  case FF_CMP_W97:
348  return (2*lambda)>>(FF_LAMBDA_SHIFT);
349  case FF_CMP_SATD:
350  case FF_CMP_DCT264:
351  return (2*lambda)>>FF_LAMBDA_SHIFT;
352  case FF_CMP_RD:
353  case FF_CMP_PSNR:
354  case FF_CMP_SSE:
355  case FF_CMP_NSSE:
356  return lambda2>>FF_LAMBDA_SHIFT;
357  case FF_CMP_BIT:
358  return 1;
359  }
360 }
361 
362 //FIXME copy&paste
363 #define P_LEFT P[1]
364 #define P_TOP P[2]
365 #define P_TOPRIGHT P[3]
366 #define P_MEDIAN P[4]
367 #define P_MV1 P[9]
368 #define FLAG_QPEL 1 //must be 1
369 
370 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
371 {
372  SnowContext *const s = &enc->com;
373  MotionEstContext *const c = &enc->m.me;
374  uint8_t p_buffer[1024];
375  uint8_t i_buffer[1024];
376  uint8_t p_state[sizeof(s->block_state)];
377  uint8_t i_state[sizeof(s->block_state)];
378  RangeCoder pc, ic;
379  uint8_t *pbbak= s->c.bytestream;
380  uint8_t *pbbak_start= s->c.bytestream_start;
381  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
382  const int w= s->b_width << s->block_max_depth;
383  const int h= s->b_height << s->block_max_depth;
384  const int rem_depth= s->block_max_depth - level;
385  const int index= (x + y*w) << rem_depth;
386  const int block_w= 1<<(LOG2_MB_SIZE - level);
387  int trx= (x+1)<<rem_depth;
388  int try= (y+1)<<rem_depth;
389  const BlockNode *left = x ? &s->block[index-1] : &null_block;
390  const BlockNode *top = y ? &s->block[index-w] : &null_block;
391  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
392  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
393  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
394  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
395  int pl = left->color[0];
396  int pcb= left->color[1];
397  int pcr= left->color[2];
398  int pmx, pmy;
399  int mx=0, my=0;
400  int l,cr,cb;
401  const int stride= s->current_picture->linesize[0];
402  const int uvstride= s->current_picture->linesize[1];
403  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
404  s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
405  s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
406  int P[10][2];
407  int16_t last_mv[3][2];
408  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
409  const int shift= 1+qpel;
410  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
411  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
412  int my_context= av_log2(2*FFABS(left->my - top->my));
413  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
414  int ref, best_ref, ref_score, ref_mx, ref_my;
415 
416  av_assert0(sizeof(s->block_state) >= 256);
417  if(s->keyframe){
418  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
419  return 0;
420  }
421 
422 // clip predictors / edge ?
423 
424  P_LEFT[0]= left->mx;
425  P_LEFT[1]= left->my;
426  P_TOP [0]= top->mx;
427  P_TOP [1]= top->my;
428  P_TOPRIGHT[0]= tr->mx;
429  P_TOPRIGHT[1]= tr->my;
430 
431  last_mv[0][0]= s->block[index].mx;
432  last_mv[0][1]= s->block[index].my;
433  last_mv[1][0]= right->mx;
434  last_mv[1][1]= right->my;
435  last_mv[2][0]= bottom->mx;
436  last_mv[2][1]= bottom->my;
437 
438  enc->m.mb_stride = 2;
439  enc->m.mb_x =
440  enc->m.mb_y = 0;
441  c->skip= 0;
442 
443  av_assert1(c-> stride == stride);
444  av_assert1(c->uvstride == uvstride);
445 
446  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
447  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
448  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
449  c->current_mv_penalty = c->mv_penalty[enc->m.f_code=1] + MAX_DMV;
450 
451  c->xmin = - x*block_w - 16+3;
452  c->ymin = - y*block_w - 16+3;
453  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
454  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
455 
456  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
457  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
458  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
459  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
460  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
461  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
462  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
463 
464  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
465  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
466 
467  if (!y) {
468  c->pred_x= P_LEFT[0];
469  c->pred_y= P_LEFT[1];
470  } else {
471  c->pred_x = P_MEDIAN[0];
472  c->pred_y = P_MEDIAN[1];
473  }
474 
475  score= INT_MAX;
476  best_ref= 0;
477  for(ref=0; ref<s->ref_frames; ref++){
478  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
479 
480  ref_score= ff_epzs_motion_search(&enc->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
481  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
482 
483  av_assert2(ref_mx >= c->xmin);
484  av_assert2(ref_mx <= c->xmax);
485  av_assert2(ref_my >= c->ymin);
486  av_assert2(ref_my <= c->ymax);
487 
488  ref_score= c->sub_motion_search(&enc->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
489  ref_score= ff_get_mb_score(&enc->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
490  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
491  if(s->ref_mvs[ref]){
492  s->ref_mvs[ref][index][0]= ref_mx;
493  s->ref_mvs[ref][index][1]= ref_my;
494  s->ref_scores[ref][index]= ref_score;
495  }
496  if(score > ref_score){
497  score= ref_score;
498  best_ref= ref;
499  mx= ref_mx;
500  my= ref_my;
501  }
502  }
503  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
504 
505  // subpel search
506  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
507  pc= s->c;
508  pc.bytestream_start=
509  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
510  memcpy(p_state, s->block_state, sizeof(s->block_state));
511 
512  if(level!=s->block_max_depth)
513  put_rac(&pc, &p_state[4 + s_context], 1);
514  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
515  if(s->ref_frames > 1)
516  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
517  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
518  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
519  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
520  p_len= pc.bytestream - pc.bytestream_start;
521  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
522 
523  block_s= block_w*block_w;
524  sum = pix_sum(current_data[0], stride, block_w, block_w);
525  l= (sum + block_s/2)/block_s;
526  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
527 
528  if (s->nb_planes > 2) {
529  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
530  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
531  cb= (sum + block_s/2)/block_s;
532  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
533  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
534  cr= (sum + block_s/2)/block_s;
535  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
536  }else
537  cb = cr = 0;
538 
539  ic= s->c;
540  ic.bytestream_start=
541  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
542  memcpy(i_state, s->block_state, sizeof(s->block_state));
543  if(level!=s->block_max_depth)
544  put_rac(&ic, &i_state[4 + s_context], 1);
545  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
546  put_symbol(&ic, &i_state[32], l-pl , 1);
547  if (s->nb_planes > 2) {
548  put_symbol(&ic, &i_state[64], cb-pcb, 1);
549  put_symbol(&ic, &i_state[96], cr-pcr, 1);
550  }
551  i_len= ic.bytestream - ic.bytestream_start;
552  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
553 
554  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
555  av_assert1(iscore >= 0);
556  av_assert1(l>=0 && l<=255);
557  av_assert1(pl>=0 && pl<=255);
558 
559  if(level==0){
560  int varc= iscore >> 8;
561  int vard= score >> 8;
562  if (vard <= 64 || vard < varc)
563  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
564  else
565  c->scene_change_score += enc->m.qscale;
566  }
567 
568  if(level!=s->block_max_depth){
569  put_rac(&s->c, &s->block_state[4 + s_context], 0);
570  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
571  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
572  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
573  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
574  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
575 
576  if(score2 < score && score2 < iscore)
577  return score2;
578  }
579 
580  if(iscore < score){
581  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
582  memcpy(pbbak, i_buffer, i_len);
583  s->c= ic;
584  s->c.bytestream_start= pbbak_start;
585  s->c.bytestream= pbbak + i_len;
586  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
587  memcpy(s->block_state, i_state, sizeof(s->block_state));
588  return iscore;
589  }else{
590  memcpy(pbbak, p_buffer, p_len);
591  s->c= pc;
592  s->c.bytestream_start= pbbak_start;
593  s->c.bytestream= pbbak + p_len;
594  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
595  memcpy(s->block_state, p_state, sizeof(s->block_state));
596  return score;
597  }
598 }
599 
600 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
601  const int w= s->b_width << s->block_max_depth;
602  const int rem_depth= s->block_max_depth - level;
603  const int index= (x + y*w) << rem_depth;
604  int trx= (x+1)<<rem_depth;
605  BlockNode *b= &s->block[index];
606  const BlockNode *left = x ? &s->block[index-1] : &null_block;
607  const BlockNode *top = y ? &s->block[index-w] : &null_block;
608  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
609  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
610  int pl = left->color[0];
611  int pcb= left->color[1];
612  int pcr= left->color[2];
613  int pmx, pmy;
614  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
615  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
616  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
617  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
618 
619  if(s->keyframe){
620  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
621  return;
622  }
623 
624  if(level!=s->block_max_depth){
625  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
626  put_rac(&s->c, &s->block_state[4 + s_context], 1);
627  }else{
628  put_rac(&s->c, &s->block_state[4 + s_context], 0);
629  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
630  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
631  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
632  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
633  return;
634  }
635  }
636  if(b->type & BLOCK_INTRA){
637  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
638  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
639  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
640  if (s->nb_planes > 2) {
641  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
642  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
643  }
644  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
645  }else{
646  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
647  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
648  if(s->ref_frames > 1)
649  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
650  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
651  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
652  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
653  }
654 }
655 
656 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
657 {
658  SnowContext *const s = &enc->com;
659  int i, x2, y2;
660  Plane *p= &s->plane[plane_index];
661  const int block_size = MB_SIZE >> s->block_max_depth;
662  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
663  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
664  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
665  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
666  const int ref_stride= s->current_picture->linesize[plane_index];
667  const uint8_t *src = s->input_picture->data[plane_index];
668  IDWTELEM *dst= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
669  const int b_stride = s->b_width << s->block_max_depth;
670  const int w= p->width;
671  const int h= p->height;
672  int index= mb_x + mb_y*b_stride;
673  BlockNode *b= &s->block[index];
674  BlockNode backup= *b;
675  int ab=0;
676  int aa=0;
677 
678  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
679 
680  b->type|= BLOCK_INTRA;
681  b->color[plane_index]= 0;
682  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
683 
684  for(i=0; i<4; i++){
685  int mb_x2= mb_x + (i &1) - 1;
686  int mb_y2= mb_y + (i>>1) - 1;
687  int x= block_w*mb_x2 + block_w/2;
688  int y= block_h*mb_y2 + block_h/2;
689 
690  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
691  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
692 
693  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
694  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
695  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
696  int obmc_v= obmc[index];
697  int d;
698  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
699  if(x<0) obmc_v += obmc[index + block_w];
700  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
701  if(x+block_w>w) obmc_v += obmc[index - block_w];
702  //FIXME precalculate this or simplify it somehow else
703 
704  d = -dst[index] + (1<<(FRAC_BITS-1));
705  dst[index] = d;
706  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
707  aa += obmc_v * obmc_v; //FIXME precalculate this
708  }
709  }
710  }
711  *b= backup;
712 
713  return av_clip_uint8( ROUNDED_DIV(ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
714 }
715 
716 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
717  const int b_stride = s->b_width << s->block_max_depth;
718  const int b_height = s->b_height<< s->block_max_depth;
719  int index= x + y*b_stride;
720  const BlockNode *b = &s->block[index];
721  const BlockNode *left = x ? &s->block[index-1] : &null_block;
722  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
723  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
724  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
725  int dmx, dmy;
726 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
727 // int my_context= av_log2(2*FFABS(left->my - top->my));
728 
729  if(x<0 || x>=b_stride || y>=b_height)
730  return 0;
731 /*
732 1 0 0
733 01X 1-2 1
734 001XX 3-6 2-3
735 0001XXX 7-14 4-7
736 00001XXXX 15-30 8-15
737 */
738 //FIXME try accurate rate
739 //FIXME intra and inter predictors if surrounding blocks are not the same type
740  if(b->type & BLOCK_INTRA){
741  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
742  + av_log2(2*FFABS(left->color[1] - b->color[1]))
743  + av_log2(2*FFABS(left->color[2] - b->color[2])));
744  }else{
745  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
746  dmx-= b->mx;
747  dmy-= b->my;
748  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
749  + av_log2(2*FFABS(dmy))
750  + av_log2(2*b->ref));
751  }
752 }
753 
754 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
755  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
756 {
757  SnowContext *const s = &enc->com;
758  Plane *p= &s->plane[plane_index];
759  const int block_size = MB_SIZE >> s->block_max_depth;
760  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
761  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
762  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
763  const int ref_stride= s->current_picture->linesize[plane_index];
764  uint8_t *dst= s->current_picture->data[plane_index];
765  const uint8_t *src = s->input_picture->data[plane_index];
766  IDWTELEM *pred= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
767  uint8_t *cur = s->scratchbuf;
768  uint8_t *tmp = s->emu_edge_buffer;
769  const int b_stride = s->b_width << s->block_max_depth;
770  const int b_height = s->b_height<< s->block_max_depth;
771  const int w= p->width;
772  const int h= p->height;
773  int distortion;
774  int rate= 0;
775  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
776  int sx= block_w*mb_x - block_w/2;
777  int sy= block_h*mb_y - block_h/2;
778  int x0= FFMAX(0,-sx);
779  int y0= FFMAX(0,-sy);
780  int x1= FFMIN(block_w*2, w-sx);
781  int y1= FFMIN(block_h*2, h-sy);
782  int i,x,y;
783 
784  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below chckinhg only block_w
785 
786  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
787 
788  for(y=y0; y<y1; y++){
789  const uint8_t *obmc1= obmc_edged[y];
790  const IDWTELEM *pred1 = pred + y*obmc_stride;
791  uint8_t *cur1 = cur + y*ref_stride;
792  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
793  for(x=x0; x<x1; x++){
794 #if FRAC_BITS >= LOG2_OBMC_MAX
795  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
796 #else
797  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
798 #endif
799  v = (v + pred1[x]) >> FRAC_BITS;
800  if(v&(~255)) v= ~(v>>31);
801  dst1[x] = v;
802  }
803  }
804 
805  /* copy the regions where obmc[] = (uint8_t)256 */
806  if(LOG2_OBMC_MAX == 8
807  && (mb_x == 0 || mb_x == b_stride-1)
808  && (mb_y == 0 || mb_y == b_height-1)){
809  if(mb_x == 0)
810  x1 = block_w;
811  else
812  x0 = block_w;
813  if(mb_y == 0)
814  y1 = block_h;
815  else
816  y0 = block_h;
817  for(y=y0; y<y1; y++)
818  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
819  }
820 
821  if(block_w==16){
822  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
823  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
824  /* FIXME cmps overlap but do not cover the wavelet's whole support.
825  * So improving the score of one block is not strictly guaranteed
826  * to improve the score of the whole frame, thus iterative motion
827  * estimation does not always converge. */
828  if(s->avctx->me_cmp == FF_CMP_W97)
829  distortion = ff_w97_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
830  else if(s->avctx->me_cmp == FF_CMP_W53)
831  distortion = ff_w53_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
832  else{
833  distortion = 0;
834  for(i=0; i<4; i++){
835  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
836  distortion += enc->mecc.me_cmp[0](&enc->m, src + off, dst + off, ref_stride, 16);
837  }
838  }
839  }else{
840  av_assert2(block_w==8);
841  distortion = enc->mecc.me_cmp[0](&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
842  }
843 
844  if(plane_index==0){
845  for(i=0; i<4; i++){
846 /* ..RRr
847  * .RXx.
848  * rxx..
849  */
850  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
851  }
852  if(mb_x == b_stride-2)
853  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
854  }
855  return distortion + rate*penalty_factor;
856 }
857 
858 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
859 {
860  SnowContext *const s = &enc->com;
861  int i, y2;
862  Plane *p= &s->plane[plane_index];
863  const int block_size = MB_SIZE >> s->block_max_depth;
864  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
865  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
866  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
867  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
868  const int ref_stride= s->current_picture->linesize[plane_index];
869  uint8_t *dst= s->current_picture->data[plane_index];
870  const uint8_t *src = s->input_picture->data[plane_index];
871  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
872  // const has only been removed from zero_dst to suppress a warning
873  static IDWTELEM zero_dst[4096]; //FIXME
874  const int b_stride = s->b_width << s->block_max_depth;
875  const int w= p->width;
876  const int h= p->height;
877  int distortion= 0;
878  int rate= 0;
879  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
880 
881  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below
882 
883  for(i=0; i<9; i++){
884  int mb_x2= mb_x + (i%3) - 1;
885  int mb_y2= mb_y + (i/3) - 1;
886  int x= block_w*mb_x2 + block_w/2;
887  int y= block_h*mb_y2 + block_h/2;
888 
889  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
890  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
891 
892  //FIXME find a cleaner/simpler way to skip the outside stuff
893  for(y2= y; y2<0; y2++)
894  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
895  for(y2= h; y2<y+block_h; y2++)
896  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
897  if(x<0){
898  for(y2= y; y2<y+block_h; y2++)
899  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
900  }
901  if(x+block_w > w){
902  for(y2= y; y2<y+block_h; y2++)
903  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
904  }
905 
906  av_assert1(block_w== 8 || block_w==16);
907  distortion += enc->mecc.me_cmp[block_w==8](&enc->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
908  }
909 
910  if(plane_index==0){
911  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
912  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
913 
914 /* ..RRRr
915  * .RXXx.
916  * .RXXx.
917  * rxxx.
918  */
919  if(merged)
920  rate = get_block_bits(s, mb_x, mb_y, 2);
921  for(i=merged?4:0; i<9; i++){
922  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
923  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
924  }
925  }
926  return distortion + rate*penalty_factor;
927 }
928 
929 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
930  const int w= b->width;
931  const int h= b->height;
932  int x, y;
933 
934  if(1){
935  int run=0;
936  int *runs = s->run_buffer;
937  int run_index=0;
938  int max_index;
939 
940  for(y=0; y<h; y++){
941  for(x=0; x<w; x++){
942  int v, p=0;
943  int /*ll=0, */l=0, lt=0, t=0, rt=0;
944  v= src[x + y*stride];
945 
946  if(y){
947  t= src[x + (y-1)*stride];
948  if(x){
949  lt= src[x - 1 + (y-1)*stride];
950  }
951  if(x + 1 < w){
952  rt= src[x + 1 + (y-1)*stride];
953  }
954  }
955  if(x){
956  l= src[x - 1 + y*stride];
957  /*if(x > 1){
958  if(orientation==1) ll= src[y + (x-2)*stride];
959  else ll= src[x - 2 + y*stride];
960  }*/
961  }
962  if(parent){
963  int px= x>>1;
964  int py= y>>1;
965  if(px<b->parent->width && py<b->parent->height)
966  p= parent[px + py*2*stride];
967  }
968  if(!(/*ll|*/l|lt|t|rt|p)){
969  if(v){
970  runs[run_index++]= run;
971  run=0;
972  }else{
973  run++;
974  }
975  }
976  }
977  }
978  max_index= run_index;
979  runs[run_index++]= run;
980  run_index=0;
981  run= runs[run_index++];
982 
983  put_symbol2(&s->c, b->state[30], max_index, 0);
984  if(run_index <= max_index)
985  put_symbol2(&s->c, b->state[1], run, 3);
986 
987  for(y=0; y<h; y++){
988  if(s->c.bytestream_end - s->c.bytestream < w*40){
989  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
990  return AVERROR(ENOMEM);
991  }
992  for(x=0; x<w; x++){
993  int v, p=0;
994  int /*ll=0, */l=0, lt=0, t=0, rt=0;
995  v= src[x + y*stride];
996 
997  if(y){
998  t= src[x + (y-1)*stride];
999  if(x){
1000  lt= src[x - 1 + (y-1)*stride];
1001  }
1002  if(x + 1 < w){
1003  rt= src[x + 1 + (y-1)*stride];
1004  }
1005  }
1006  if(x){
1007  l= src[x - 1 + y*stride];
1008  /*if(x > 1){
1009  if(orientation==1) ll= src[y + (x-2)*stride];
1010  else ll= src[x - 2 + y*stride];
1011  }*/
1012  }
1013  if(parent){
1014  int px= x>>1;
1015  int py= y>>1;
1016  if(px<b->parent->width && py<b->parent->height)
1017  p= parent[px + py*2*stride];
1018  }
1019  if(/*ll|*/l|lt|t|rt|p){
1020  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1021 
1022  put_rac(&s->c, &b->state[0][context], !!v);
1023  }else{
1024  if(!run){
1025  run= runs[run_index++];
1026 
1027  if(run_index <= max_index)
1028  put_symbol2(&s->c, b->state[1], run, 3);
1029  av_assert2(v);
1030  }else{
1031  run--;
1032  av_assert2(!v);
1033  }
1034  }
1035  if(v){
1036  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1037  int l2= 2*FFABS(l) + (l<0);
1038  int t2= 2*FFABS(t) + (t<0);
1039 
1040  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1041  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1042  }
1043  }
1044  }
1045  }
1046  return 0;
1047 }
1048 
1049 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1050 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1051 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1052  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1053 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1054 }
1055 
1056 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1057  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1058 {
1059  SnowContext *const s = &enc->com;
1060  const int b_stride= s->b_width << s->block_max_depth;
1061  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1062  BlockNode backup= *block;
1063  int rd;
1064 
1065  av_assert2(mb_x>=0 && mb_y>=0);
1066  av_assert2(mb_x<b_stride);
1067 
1068  block->color[0] = p[0];
1069  block->color[1] = p[1];
1070  block->color[2] = p[2];
1071  block->type |= BLOCK_INTRA;
1072 
1073  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1074 
1075 //FIXME chroma
1076  if(rd < *best_rd){
1077  *best_rd= rd;
1078  return 1;
1079  }else{
1080  *block= backup;
1081  return 0;
1082  }
1083 }
1084 
1085 /* special case for int[2] args we discard afterwards,
1086  * fixes compilation problem with gcc 2.95 */
1088  int mb_x, int mb_y, int p0, int p1,
1089  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1090 {
1091  SnowContext *const s = &enc->com;
1092  const int b_stride = s->b_width << s->block_max_depth;
1093  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1094  BlockNode backup = *block;
1095  unsigned value;
1096  int rd, index;
1097 
1098  av_assert2(mb_x >= 0 && mb_y >= 0);
1099  av_assert2(mb_x < b_stride);
1100 
1101  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1102  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1103  if (enc->me_cache[index] == value)
1104  return 0;
1105  enc->me_cache[index] = value;
1106 
1107  block->mx = p0;
1108  block->my = p1;
1109  block->type &= ~BLOCK_INTRA;
1110 
1111  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1112 
1113 //FIXME chroma
1114  if (rd < *best_rd) {
1115  *best_rd = rd;
1116  return 1;
1117  } else {
1118  *block = backup;
1119  return 0;
1120  }
1121 }
1122 
1123 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1124  int p0, int p1, int ref, int *best_rd)
1125 {
1126  SnowContext *const s = &enc->com;
1127  const int b_stride= s->b_width << s->block_max_depth;
1128  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1129  BlockNode backup[4];
1130  unsigned value;
1131  int rd, index;
1132 
1133  /* We don't initialize backup[] during variable declaration, because
1134  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1135  * 'int16_t'". */
1136  backup[0] = block[0];
1137  backup[1] = block[1];
1138  backup[2] = block[b_stride];
1139  backup[3] = block[b_stride + 1];
1140 
1141  av_assert2(mb_x>=0 && mb_y>=0);
1142  av_assert2(mb_x<b_stride);
1143  av_assert2(((mb_x|mb_y)&1) == 0);
1144 
1145  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1146  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1147  if (enc->me_cache[index] == value)
1148  return 0;
1149  enc->me_cache[index] = value;
1150 
1151  block->mx= p0;
1152  block->my= p1;
1153  block->ref= ref;
1154  block->type &= ~BLOCK_INTRA;
1155  block[1]= block[b_stride]= block[b_stride+1]= *block;
1156 
1157  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1158 
1159 //FIXME chroma
1160  if(rd < *best_rd){
1161  *best_rd= rd;
1162  return 1;
1163  }else{
1164  block[0]= backup[0];
1165  block[1]= backup[1];
1166  block[b_stride]= backup[2];
1167  block[b_stride+1]= backup[3];
1168  return 0;
1169  }
1170 }
1171 
1172 static void iterative_me(SnowEncContext *enc)
1173 {
1174  SnowContext *const s = &enc->com;
1175  int pass, mb_x, mb_y;
1176  const int b_width = s->b_width << s->block_max_depth;
1177  const int b_height= s->b_height << s->block_max_depth;
1178  const int b_stride= b_width;
1179  int color[3];
1180 
1181  {
1182  RangeCoder r = s->c;
1183  uint8_t state[sizeof(s->block_state)];
1184  memcpy(state, s->block_state, sizeof(s->block_state));
1185  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1186  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1187  encode_q_branch(enc, 0, mb_x, mb_y);
1188  s->c = r;
1189  memcpy(s->block_state, state, sizeof(s->block_state));
1190  }
1191 
1192  for(pass=0; pass<25; pass++){
1193  int change= 0;
1194 
1195  for(mb_y= 0; mb_y<b_height; mb_y++){
1196  for(mb_x= 0; mb_x<b_width; mb_x++){
1197  int dia_change, i, j, ref;
1198  int best_rd= INT_MAX, ref_rd;
1199  BlockNode backup, ref_b;
1200  const int index= mb_x + mb_y * b_stride;
1201  BlockNode *block= &s->block[index];
1202  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1203  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1204  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1205  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1206  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1207  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1208  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1209  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1210  const int b_w= (MB_SIZE >> s->block_max_depth);
1211  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1212 
1213  if(pass && (block->type & BLOCK_OPT))
1214  continue;
1215  block->type |= BLOCK_OPT;
1216 
1217  backup= *block;
1218 
1219  if (!enc->me_cache_generation)
1220  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1221  enc->me_cache_generation += 1<<22;
1222 
1223  //FIXME precalculate
1224  {
1225  int x, y;
1226  for (y = 0; y < b_w * 2; y++)
1227  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1228  if(mb_x==0)
1229  for(y=0; y<b_w*2; y++)
1230  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1231  if(mb_x==b_stride-1)
1232  for(y=0; y<b_w*2; y++)
1233  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1234  if(mb_y==0){
1235  for(x=0; x<b_w*2; x++)
1236  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1237  for(y=1; y<b_w; y++)
1238  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1239  }
1240  if(mb_y==b_height-1){
1241  for(x=0; x<b_w*2; x++)
1242  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1243  for(y=b_w; y<b_w*2-1; y++)
1244  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1245  }
1246  }
1247 
1248  //skip stuff outside the picture
1249  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1250  const uint8_t *src = s->input_picture->data[0];
1251  uint8_t *dst= s->current_picture->data[0];
1252  const int stride= s->current_picture->linesize[0];
1253  const int block_w= MB_SIZE >> s->block_max_depth;
1254  const int block_h= MB_SIZE >> s->block_max_depth;
1255  const int sx= block_w*mb_x - block_w/2;
1256  const int sy= block_h*mb_y - block_h/2;
1257  const int w= s->plane[0].width;
1258  const int h= s->plane[0].height;
1259  int y;
1260 
1261  for(y=sy; y<0; y++)
1262  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1263  for(y=h; y<sy+block_h*2; y++)
1264  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1265  if(sx<0){
1266  for(y=sy; y<sy+block_h*2; y++)
1267  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1268  }
1269  if(sx+block_w*2 > w){
1270  for(y=sy; y<sy+block_h*2; y++)
1271  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1272  }
1273  }
1274 
1275  // intra(black) = neighbors' contribution to the current block
1276  for(i=0; i < s->nb_planes; i++)
1277  color[i]= get_dc(enc, mb_x, mb_y, i);
1278 
1279  // get previous score (cannot be cached due to OBMC)
1280  if(pass > 0 && (block->type&BLOCK_INTRA)){
1281  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1282  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1283  }else
1284  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1285 
1286  ref_b= *block;
1287  ref_rd= best_rd;
1288  for(ref=0; ref < s->ref_frames; ref++){
1289  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1290  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1291  continue;
1292  block->ref= ref;
1293  best_rd= INT_MAX;
1294 
1295  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1296  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1297  if(tb)
1298  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1299  if(lb)
1300  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1301  if(rb)
1302  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1303  if(bb)
1304  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1305 
1306  /* fullpel ME */
1307  //FIXME avoid subpel interpolation / round to nearest integer
1308  do{
1309  int newx = block->mx;
1310  int newy = block->my;
1311  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1312  dia_change=0;
1313  for(i=0; i < dia_size; i++){
1314  for(j=0; j<i; j++){
1315  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1316  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1317  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1318  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1319  }
1320  }
1321  }while(dia_change);
1322  /* subpel ME */
1323  do{
1324  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1325  dia_change=0;
1326  for(i=0; i<8; i++)
1327  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1328  }while(dia_change);
1329  //FIXME or try the standard 2 pass qpel or similar
1330 
1331  mvr[0][0]= block->mx;
1332  mvr[0][1]= block->my;
1333  if(ref_rd > best_rd){
1334  ref_rd= best_rd;
1335  ref_b= *block;
1336  }
1337  }
1338  best_rd= ref_rd;
1339  *block= ref_b;
1340  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1341  //FIXME RD style color selection
1342  if(!same_block(block, &backup)){
1343  if(tb ) tb ->type &= ~BLOCK_OPT;
1344  if(lb ) lb ->type &= ~BLOCK_OPT;
1345  if(rb ) rb ->type &= ~BLOCK_OPT;
1346  if(bb ) bb ->type &= ~BLOCK_OPT;
1347  if(tlb) tlb->type &= ~BLOCK_OPT;
1348  if(trb) trb->type &= ~BLOCK_OPT;
1349  if(blb) blb->type &= ~BLOCK_OPT;
1350  if(brb) brb->type &= ~BLOCK_OPT;
1351  change ++;
1352  }
1353  }
1354  }
1355  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1356  if(!change)
1357  break;
1358  }
1359 
1360  if(s->block_max_depth == 1){
1361  int change= 0;
1362  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1363  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1364  int i;
1365  int best_rd, init_rd;
1366  const int index= mb_x + mb_y * b_stride;
1367  BlockNode *b[4];
1368 
1369  b[0]= &s->block[index];
1370  b[1]= b[0]+1;
1371  b[2]= b[0]+b_stride;
1372  b[3]= b[2]+1;
1373  if(same_block(b[0], b[1]) &&
1374  same_block(b[0], b[2]) &&
1375  same_block(b[0], b[3]))
1376  continue;
1377 
1378  if (!enc->me_cache_generation)
1379  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1380  enc->me_cache_generation += 1<<22;
1381 
1382  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1383 
1384  //FIXME more multiref search?
1385  check_4block_inter(enc, mb_x, mb_y,
1386  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1387  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1388 
1389  for(i=0; i<4; i++)
1390  if(!(b[i]->type&BLOCK_INTRA))
1391  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1392 
1393  if(init_rd != best_rd)
1394  change++;
1395  }
1396  }
1397  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1398  }
1399 }
1400 
1401 static void encode_blocks(SnowEncContext *enc, int search)
1402 {
1403  SnowContext *const s = &enc->com;
1404  int x, y;
1405  int w= s->b_width;
1406  int h= s->b_height;
1407 
1408  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1409  iterative_me(enc);
1410 
1411  for(y=0; y<h; y++){
1412  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1413  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1414  return;
1415  }
1416  for(x=0; x<w; x++){
1417  if (enc->motion_est == FF_ME_ITER || !search)
1418  encode_q_branch2(s, 0, x, y);
1419  else
1420  encode_q_branch (enc, 0, x, y);
1421  }
1422  }
1423 }
1424 
1425 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1426  const int w= b->width;
1427  const int h= b->height;
1428  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1429  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1430  int x,y, thres1, thres2;
1431 
1432  if(s->qlog == LOSSLESS_QLOG){
1433  for(y=0; y<h; y++)
1434  for(x=0; x<w; x++)
1435  dst[x + y*stride]= src[x + y*stride];
1436  return;
1437  }
1438 
1439  bias= bias ? 0 : (3*qmul)>>3;
1440  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1441  thres2= 2*thres1;
1442 
1443  if(!bias){
1444  for(y=0; y<h; y++){
1445  for(x=0; x<w; x++){
1446  int i= src[x + y*stride];
1447 
1448  if((unsigned)(i+thres1) > thres2){
1449  if(i>=0){
1450  i<<= QEXPSHIFT;
1451  i/= qmul; //FIXME optimize
1452  dst[x + y*stride]= i;
1453  }else{
1454  i= -i;
1455  i<<= QEXPSHIFT;
1456  i/= qmul; //FIXME optimize
1457  dst[x + y*stride]= -i;
1458  }
1459  }else
1460  dst[x + y*stride]= 0;
1461  }
1462  }
1463  }else{
1464  for(y=0; y<h; y++){
1465  for(x=0; x<w; x++){
1466  int i= src[x + y*stride];
1467 
1468  if((unsigned)(i+thres1) > thres2){
1469  if(i>=0){
1470  i<<= QEXPSHIFT;
1471  i= (i + bias) / qmul; //FIXME optimize
1472  dst[x + y*stride]= i;
1473  }else{
1474  i= -i;
1475  i<<= QEXPSHIFT;
1476  i= (i + bias) / qmul; //FIXME optimize
1477  dst[x + y*stride]= -i;
1478  }
1479  }else
1480  dst[x + y*stride]= 0;
1481  }
1482  }
1483  }
1484 }
1485 
1487  const int w= b->width;
1488  const int h= b->height;
1489  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1490  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1491  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1492  int x,y;
1493 
1494  if(s->qlog == LOSSLESS_QLOG) return;
1495 
1496  for(y=0; y<h; y++){
1497  for(x=0; x<w; x++){
1498  int i= src[x + y*stride];
1499  if(i<0){
1500  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1501  }else if(i>0){
1502  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1503  }
1504  }
1505  }
1506 }
1507 
1508 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1509  const int w= b->width;
1510  const int h= b->height;
1511  int x,y;
1512 
1513  for(y=h-1; y>=0; y--){
1514  for(x=w-1; x>=0; x--){
1515  int i= x + y*stride;
1516 
1517  if(x){
1518  if(use_median){
1519  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1520  else src[i] -= src[i - 1];
1521  }else{
1522  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1523  else src[i] -= src[i - 1];
1524  }
1525  }else{
1526  if(y) src[i] -= src[i - stride];
1527  }
1528  }
1529  }
1530 }
1531 
1532 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1533  const int w= b->width;
1534  const int h= b->height;
1535  int x,y;
1536 
1537  for(y=0; y<h; y++){
1538  for(x=0; x<w; x++){
1539  int i= x + y*stride;
1540 
1541  if(x){
1542  if(use_median){
1543  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1544  else src[i] += src[i - 1];
1545  }else{
1546  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1547  else src[i] += src[i - 1];
1548  }
1549  }else{
1550  if(y) src[i] += src[i - stride];
1551  }
1552  }
1553  }
1554 }
1555 
1557  int plane_index, level, orientation;
1558 
1559  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1560  for(level=0; level<s->spatial_decomposition_count; level++){
1561  for(orientation=level ? 1:0; orientation<4; orientation++){
1562  if(orientation==2) continue;
1563  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1564  }
1565  }
1566  }
1567 }
1568 
1570  int plane_index, i;
1571  uint8_t kstate[32];
1572 
1573  memset(kstate, MID_STATE, sizeof(kstate));
1574 
1575  put_rac(&s->c, kstate, s->keyframe);
1576  if(s->keyframe || s->always_reset){
1578  s->last_spatial_decomposition_type=
1579  s->last_qlog=
1580  s->last_qbias=
1581  s->last_mv_scale=
1582  s->last_block_max_depth= 0;
1583  for(plane_index=0; plane_index<2; plane_index++){
1584  Plane *p= &s->plane[plane_index];
1585  p->last_htaps=0;
1586  p->last_diag_mc=0;
1587  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1588  }
1589  }
1590  if(s->keyframe){
1591  put_symbol(&s->c, s->header_state, s->version, 0);
1592  put_rac(&s->c, s->header_state, s->always_reset);
1593  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1594  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1595  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1596  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1597  if (s->nb_planes > 2) {
1598  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1599  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1600  }
1601  put_rac(&s->c, s->header_state, s->spatial_scalability);
1602 // put_rac(&s->c, s->header_state, s->rate_scalability);
1603  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1604 
1605  encode_qlogs(s);
1606  }
1607 
1608  if(!s->keyframe){
1609  int update_mc=0;
1610  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1611  Plane *p= &s->plane[plane_index];
1612  update_mc |= p->last_htaps != p->htaps;
1613  update_mc |= p->last_diag_mc != p->diag_mc;
1614  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1615  }
1616  put_rac(&s->c, s->header_state, update_mc);
1617  if(update_mc){
1618  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1619  Plane *p= &s->plane[plane_index];
1620  put_rac(&s->c, s->header_state, p->diag_mc);
1621  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1622  for(i= p->htaps/2; i; i--)
1623  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1624  }
1625  }
1626  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1627  put_rac(&s->c, s->header_state, 1);
1628  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1629  encode_qlogs(s);
1630  }else
1631  put_rac(&s->c, s->header_state, 0);
1632  }
1633 
1634  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1635  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1636  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1637  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1638  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1639 
1640 }
1641 
1643  int plane_index;
1644 
1645  if(!s->keyframe){
1646  for(plane_index=0; plane_index<2; plane_index++){
1647  Plane *p= &s->plane[plane_index];
1648  p->last_diag_mc= p->diag_mc;
1649  p->last_htaps = p->htaps;
1650  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1651  }
1652  }
1653 
1654  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1655  s->last_qlog = s->qlog;
1656  s->last_qbias = s->qbias;
1657  s->last_mv_scale = s->mv_scale;
1658  s->last_block_max_depth = s->block_max_depth;
1659  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1660 }
1661 
1662 static int qscale2qlog(int qscale){
1663  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1664  + 61*QROOT/8; ///< 64 > 60
1665 }
1666 
1668 {
1669  SnowContext *const s = &enc->com;
1670  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1671  * FIXME we know exact mv bits at this point,
1672  * but ratecontrol isn't set up to include them. */
1673  uint32_t coef_sum= 0;
1674  int level, orientation, delta_qlog;
1675 
1676  for(level=0; level<s->spatial_decomposition_count; level++){
1677  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1678  SubBand *b= &s->plane[0].band[level][orientation];
1679  IDWTELEM *buf= b->ibuf;
1680  const int w= b->width;
1681  const int h= b->height;
1682  const int stride= b->stride;
1683  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1684  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1685  const int qdiv= (1<<16)/qmul;
1686  int x, y;
1687  //FIXME this is ugly
1688  for(y=0; y<h; y++)
1689  for(x=0; x<w; x++)
1690  buf[x+y*stride]= b->buf[x+y*stride];
1691  if(orientation==0)
1692  decorrelate(s, b, buf, stride, 1, 0);
1693  for(y=0; y<h; y++)
1694  for(x=0; x<w; x++)
1695  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1696  }
1697  }
1698 
1699  /* ugly, ratecontrol just takes a sqrt again */
1700  av_assert0(coef_sum < INT_MAX);
1701  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1702 
1703  if(pict->pict_type == AV_PICTURE_TYPE_I){
1704  enc->m.mb_var_sum = coef_sum;
1705  enc->m.mc_mb_var_sum = 0;
1706  }else{
1707  enc->m.mc_mb_var_sum = coef_sum;
1708  enc->m.mb_var_sum = 0;
1709  }
1710 
1711  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1712  if (pict->quality < 0)
1713  return INT_MIN;
1714  enc->lambda= pict->quality * 3/2;
1715  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1716  s->qlog+= delta_qlog;
1717  return delta_qlog;
1718 }
1719 
1721  int width = p->width;
1722  int height= p->height;
1723  int level, orientation, x, y;
1724 
1725  for(level=0; level<s->spatial_decomposition_count; level++){
1726  int64_t error=0;
1727  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1728  SubBand *b= &p->band[level][orientation];
1729  IDWTELEM *ibuf= b->ibuf;
1730 
1731  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1732  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1733  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1734  for(y=0; y<height; y++){
1735  for(x=0; x<width; x++){
1736  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1737  error += d*d;
1738  }
1739  }
1740  if (orientation == 2)
1741  error /= 2;
1742  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1743  if (orientation != 1)
1744  error = 0;
1745  }
1746  p->band[level][1].qlog = p->band[level][2].qlog;
1747  }
1748 }
1749 
1751  const AVFrame *pict, int *got_packet)
1752 {
1753  SnowEncContext *const enc = avctx->priv_data;
1754  SnowContext *const s = &enc->com;
1755  MpegEncContext *const mpv = &enc->m;
1756  RangeCoder * const c= &s->c;
1757  AVCodecInternal *avci = avctx->internal;
1758  AVFrame *pic;
1759  const int width= s->avctx->width;
1760  const int height= s->avctx->height;
1761  int level, orientation, plane_index, i, y, ret;
1762  uint8_t rc_header_bak[sizeof(s->header_state)];
1763  uint8_t rc_block_bak[sizeof(s->block_state)];
1764 
1765  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1766  return ret;
1767 
1769  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1770 
1771  for(i=0; i < s->nb_planes; i++){
1772  int hshift= i ? s->chroma_h_shift : 0;
1773  int vshift= i ? s->chroma_v_shift : 0;
1774  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1775  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1776  &pict->data[i][y * pict->linesize[i]],
1777  AV_CEIL_RSHIFT(width, hshift));
1778  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1779  AV_CEIL_RSHIFT(width, hshift), AV_CEIL_RSHIFT(height, vshift),
1780  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1781  EDGE_TOP | EDGE_BOTTOM);
1782 
1783  }
1784  emms_c();
1785  pic = s->input_picture;
1786  pic->pict_type = pict->pict_type;
1787  pic->quality = pict->quality;
1788 
1789  mpv->picture_number = avctx->frame_num;
1790  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1791  mpv->pict_type = pic->pict_type = mpv->rc_context.entry[avctx->frame_num].new_pict_type;
1792  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1793  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1794  pic->quality = ff_rate_estimate_qscale(mpv, 0);
1795  if (pic->quality < 0)
1796  return -1;
1797  }
1798  }else{
1799  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1800  mpv->pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1801  }
1802 
1803  if (enc->pass1_rc && avctx->frame_num == 0)
1804  pic->quality = 2*FF_QP2LAMBDA;
1805  if (pic->quality) {
1806  s->qlog = qscale2qlog(pic->quality);
1807  enc->lambda = pic->quality * 3/2;
1808  }
1809  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1810  s->qlog= LOSSLESS_QLOG;
1811  enc->lambda = 0;
1812  }//else keep previous frame's qlog until after motion estimation
1813 
1814  if (s->current_picture->data[0]) {
1815  int w = s->avctx->width;
1816  int h = s->avctx->height;
1817 
1818  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1819  s->current_picture->linesize[0], w , h ,
1821  if (s->current_picture->data[2]) {
1822  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1823  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1824  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1825  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1826  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1827  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1828  }
1829  emms_c();
1830  }
1831 
1833  ret = get_encode_buffer(s, s->current_picture);
1834  if (ret < 0)
1835  return ret;
1836 
1837  mpv->current_picture_ptr = &mpv->current_picture;
1838  mpv->current_picture.f = s->current_picture;
1839  mpv->current_picture.f->pts = pict->pts;
1840  if(pic->pict_type == AV_PICTURE_TYPE_P){
1841  int block_width = (width +15)>>4;
1842  int block_height= (height+15)>>4;
1843  int stride= s->current_picture->linesize[0];
1844 
1845  av_assert0(s->current_picture->data[0]);
1846  av_assert0(s->last_picture[0]->data[0]);
1847 
1848  mpv->avctx = s->avctx;
1849  mpv->last_picture.f = s->last_picture[0];
1850  mpv-> new_picture = s->input_picture;
1851  mpv->last_picture_ptr = &mpv->last_picture;
1852  mpv->linesize = stride;
1853  mpv->uvlinesize = s->current_picture->linesize[1];
1854  mpv->width = width;
1855  mpv->height = height;
1856  mpv->mb_width = block_width;
1857  mpv->mb_height = block_height;
1858  mpv->mb_stride = mpv->mb_width + 1;
1859  mpv->b8_stride = 2 * mpv->mb_width + 1;
1860  mpv->f_code = 1;
1861  mpv->pict_type = pic->pict_type;
1862  mpv->motion_est = enc->motion_est;
1863  mpv->me.scene_change_score = 0;
1864  mpv->me.dia_size = avctx->dia_size;
1865  mpv->quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1866  mpv->out_format = FMT_H263;
1867  mpv->unrestricted_mv = 1;
1868 
1869  mpv->lambda = enc->lambda;
1870  mpv->qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1871  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1872 
1873  mpv->mecc = enc->mecc; //move
1874  mpv->qdsp = enc->qdsp; //move
1875  mpv->hdsp = s->hdsp;
1876  ff_init_me(&enc->m);
1877  s->hdsp = mpv->hdsp;
1878  enc->mecc = mpv->mecc;
1879  }
1880 
1881  if (enc->pass1_rc) {
1882  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1883  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1884  }
1885 
1886 redo_frame:
1887 
1888  s->spatial_decomposition_count= 5;
1889 
1890  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1891  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1892  s->spatial_decomposition_count--;
1893 
1894  if (s->spatial_decomposition_count <= 0) {
1895  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1896  return AVERROR(EINVAL);
1897  }
1898 
1899  mpv->pict_type = pic->pict_type;
1900  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1901 
1903 
1904  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1905  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1906  calculate_visual_weight(s, &s->plane[plane_index]);
1907  }
1908  }
1909 
1910  encode_header(s);
1911  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1912  encode_blocks(enc, 1);
1913  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1914 
1915  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1916  Plane *p= &s->plane[plane_index];
1917  int w= p->width;
1918  int h= p->height;
1919  int x, y;
1920 // int bits= put_bits_count(&s->c.pb);
1921 
1922  if (!enc->memc_only) {
1923  //FIXME optimize
1924  if(pict->data[plane_index]) //FIXME gray hack
1925  for(y=0; y<h; y++){
1926  for(x=0; x<w; x++){
1927  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1928  }
1929  }
1930  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1931 
1932  if( plane_index==0
1933  && pic->pict_type == AV_PICTURE_TYPE_P
1934  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1935  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1937  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1939  s->keyframe=1;
1940  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1941  goto redo_frame;
1942  }
1943 
1944  if(s->qlog == LOSSLESS_QLOG){
1945  for(y=0; y<h; y++){
1946  for(x=0; x<w; x++){
1947  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1948  }
1949  }
1950  }else{
1951  for(y=0; y<h; y++){
1952  for(x=0; x<w; x++){
1953  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1954  }
1955  }
1956  }
1957 
1958  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1959 
1960  if (enc->pass1_rc && plane_index==0) {
1961  int delta_qlog = ratecontrol_1pass(enc, pic);
1962  if (delta_qlog <= INT_MIN)
1963  return -1;
1964  if(delta_qlog){
1965  //reordering qlog in the bitstream would eliminate this reset
1967  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1968  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1969  encode_header(s);
1970  encode_blocks(enc, 0);
1971  }
1972  }
1973 
1974  for(level=0; level<s->spatial_decomposition_count; level++){
1975  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1976  SubBand *b= &p->band[level][orientation];
1977 
1978  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1979  if(orientation==0)
1980  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1981  if (!enc->no_bitstream)
1982  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1983  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1984  if(orientation==0)
1985  correlate(s, b, b->ibuf, b->stride, 1, 0);
1986  }
1987  }
1988 
1989  for(level=0; level<s->spatial_decomposition_count; level++){
1990  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1991  SubBand *b= &p->band[level][orientation];
1992 
1993  dequantize(s, b, b->ibuf, b->stride);
1994  }
1995  }
1996 
1997  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1998  if(s->qlog == LOSSLESS_QLOG){
1999  for(y=0; y<h; y++){
2000  for(x=0; x<w; x++){
2001  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2002  }
2003  }
2004  }
2005  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2006  }else{
2007  //ME/MC only
2008  if(pic->pict_type == AV_PICTURE_TYPE_I){
2009  for(y=0; y<h; y++){
2010  for(x=0; x<w; x++){
2011  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2012  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2013  }
2014  }
2015  }else{
2016  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2017  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2018  }
2019  }
2020  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2021  int64_t error= 0;
2022 
2023  if(pict->data[plane_index]) //FIXME gray hack
2024  for(y=0; y<h; y++){
2025  for(x=0; x<w; x++){
2026  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2027  error += d*d;
2028  }
2029  }
2030  s->avctx->error[plane_index] += error;
2031  enc->encoding_error[plane_index] = error;
2032  }
2033 
2034  }
2035  emms_c();
2036 
2038 
2039  ff_snow_release_buffer(avctx);
2040 
2041  s->current_picture->pict_type = pic->pict_type;
2042  s->current_picture->quality = pic->quality;
2043  mpv->frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2044  mpv->p_tex_bits = mpv->frame_bits - mpv->misc_bits - mpv->mv_bits;
2045  mpv->total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2048  mpv->current_picture.f->quality = pic->quality;
2049  if (enc->pass1_rc)
2050  if (ff_rate_estimate_qscale(mpv, 0) < 0)
2051  return -1;
2052  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2053  ff_write_pass1_stats(mpv);
2054  mpv->last_pict_type = mpv->pict_type;
2055 
2056  emms_c();
2057 
2058  ff_side_data_set_encoder_stats(pkt, s->current_picture->quality,
2059  enc->encoding_error,
2060  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2061  s->current_picture->pict_type);
2062  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2063  av_frame_replace(avci->recon_frame, s->current_picture);
2064  }
2065 
2066  pkt->size = ff_rac_terminate(c, 0);
2067  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2069  *got_packet = 1;
2070 
2071  return 0;
2072 }
2073 
2075 {
2076  SnowEncContext *const enc = avctx->priv_data;
2077  SnowContext *const s = &enc->com;
2078 
2080  ff_rate_control_uninit(&enc->m);
2081  av_frame_free(&s->input_picture);
2082 
2083  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2084  av_freep(&s->ref_mvs[i]);
2085  av_freep(&s->ref_scores[i]);
2086  }
2087 
2088  enc->m.me.temp = NULL;
2089  av_freep(&enc->m.me.scratchpad);
2090  av_freep(&enc->m.me.map);
2091  av_freep(&enc->m.sc.obmc_scratchpad);
2092 
2093  av_freep(&avctx->stats_out);
2094 
2095  return 0;
2096 }
2097 
2098 #define OFFSET(x) offsetof(SnowEncContext, x)
2099 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2100 static const AVOption options[] = {
2101  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2102  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2103  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2104  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2105  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2106  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2107  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2108  { "intra_penalty", "Penalty for intra blocks in block decission", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2109  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2110  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2111  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2112  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2113  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2114  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2115  "defined in the section 'Expression Evaluation', the following functions are available: "
2116  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2117  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2118  OFFSET(m.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2119  { NULL },
2120 };
2121 
2122 static const AVClass snowenc_class = {
2123  .class_name = "snow encoder",
2124  .item_name = av_default_item_name,
2125  .option = options,
2126  .version = LIBAVUTIL_VERSION_INT,
2127 };
2128 
2130  .p.name = "snow",
2131  CODEC_LONG_NAME("Snow"),
2132  .p.type = AVMEDIA_TYPE_VIDEO,
2133  .p.id = AV_CODEC_ID_SNOW,
2134  .p.capabilities = AV_CODEC_CAP_DR1 |
2137  .priv_data_size = sizeof(SnowEncContext),
2138  .init = encode_init,
2140  .close = encode_end,
2141  .p.pix_fmts = (const enum AVPixelFormat[]){
2145  },
2146  .p.priv_class = &snowenc_class,
2147  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2148 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1049
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1508
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:402
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
Plane::last_diag_mc
int last_diag_mc
Definition: snow.h:110
P_LEFT
#define P_LEFT
Definition: snowenc.c:363
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:99
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:429
ScratchpadContext::obmc_scratchpad
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:39
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:202
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:51
libm.h
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:512
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1667
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:242
MpegEncContext::total_bits
int64_t total_bits
Definition: mpegvideo.h:336
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:41
inverse
inverse
Definition: af_crystalizer.c:121
MpegEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:340
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2074
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:61
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
MotionEstContext
Motion estimation context.
Definition: motion_est.h:47
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:174
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:156
h263enc.h
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:175
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
DWT_97
#define DWT_97
Definition: snow_dwt.h:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:128
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1642
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1172
AVPacket::data
uint8_t * data
Definition: packet.h:524
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:124
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:48
DWT_53
#define DWT_53
Definition: snow_dwt.h:71
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:338
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:43
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:929
rangecoder.h
FFCodec
Definition: codec_internal.h:126
MpegEncContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:216
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:491
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1750
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:579
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:554
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
Plane::diag_mc
int diag_mc
Definition: snow.h:105
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1123
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:207
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:67
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1401
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:715
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1008
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1056
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2098
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:285
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
MpegEncContext::width
int width
Definition: mpegvideo.h:96
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:858
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:896
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
SnowEncContext
Definition: snowenc.c:46
MpegEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:255
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:886
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:450
lrint
#define lrint
Definition: tablegen.h:53
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:370
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:900
emms_c
#define emms_c()
Definition: emms.h:63
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:63
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1239
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1720
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
MpegEncContext::bit_rate
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:99
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:48
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:220
MpegEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:337
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:322
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:395
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:136
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:69
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:70
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MpegEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:256
MECmpContext
Definition: me_cmp.h:55
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ff_epzs_motion_search
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
run
uint8_t run
Definition: svq3.c:204
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:66
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:289
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:230
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:754
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
VE
#define VE
Definition: snowenc.c:2099
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:57
MpegEncContext::mecc
MECmpContext mecc
Definition: mpegvideo.h:224
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:222
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:514
mathops.h
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:343
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:126
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1532
ff_w53_32_c
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
QROOT
#define QROOT
Definition: snow.h:43
MpegEncContext::last_picture_ptr
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:177
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:283
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:42
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:72
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:89
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
MECmpContext::me_cmp
me_cmp_func me_cmp[6]
Definition: me_cmp.h:74
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:54
Picture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:80
AVPacket::size
int size
Definition: packet.h:525
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:200
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1569
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:899
Plane::height
int height
Definition: cfhd.h:119
P
#define P
shift
static int shift(int a, int b)
Definition: bonk.c:261
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:506
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:53
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:203
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:906
Plane::last_hcoeff
int8_t last_hcoeff[HTAPS_MAX/2]
Definition: snow.h:109
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:227
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:306
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:59
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2129
SubBand
Definition: cfhd.h:108
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:391
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:897
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
height
#define height
Plane::htaps
int htaps
Definition: snow.h:103
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
Plane::last_htaps
int last_htaps
Definition: snow.h:108
Plane::width
int width
Definition: cfhd.h:118
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:58
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:530
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:895
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:600
ff_get_mb_score
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:60
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
SnowEncContext::m
MpegEncContext m
Definition: snowenc.c:64
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
MpegEncContext::current_picture_ptr
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:179
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:264
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:901
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:716
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1662
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:289
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:918
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:816
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:198
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:905
MECmpContext::me_sub_cmp
me_cmp_func me_sub_cmp[6]
Definition: me_cmp.h:75
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:1004
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:395
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:57
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:65
SnowEncContext::com
SnowContext com
Definition: snowenc.c:47
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:44
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:656
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:691
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:122
log2
#define log2(x)
Definition: libm.h:404
MotionEstContext::score_map
uint32_t * score_map
map to store the scores
Definition: motion_est.h:56
MpegEncContext::motion_est
int motion_est
ME algorithm.
Definition: mpegvideo.h:259
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2030
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:49
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:147
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:158
options
static const AVOption options[]
Definition: snowenc.c:2100
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:107
square
static int square(int x)
Definition: roqvideoenc.c:196
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:63
me_cmp.h
Picture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:81
SubBand::qlog
int qlog
log(qscale)/log[2^(1/6)]
Definition: snow.h:87
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1556
state
static struct @399 state
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:483
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:263
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
t2
#define t2
Definition: regdef.h:30
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
MpegEncContext::lmin
int lmin
Definition: mpegvideo.h:515
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:898
MpegEncContext::lmax
int lmax
Definition: mpegvideo.h:515
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:87
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:1011
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:117
MpegEncContext::last_picture
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:157
Plane
Definition: cfhd.h:117
MotionEstContext::map
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:55
av_clip_uint8
#define av_clip_uint8
Definition: common.h:105
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:210
mem.h
packet_internal.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
ff_set_cmp
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:243
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:523
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:909
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1425
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:56
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1486
ff_w97_32_c
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
MpegEncContext::last_pict_type
int last_pict_type
Definition: mpegvideo.h:209
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:84
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:349
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:607
h
h
Definition: vp9dsp_template.c:2038
RangeCoder
Definition: mss3.c:63
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2122
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
int
int
Definition: ffmpeg_filter.c:424
SnowEncContext::pred
int pred
Definition: snowenc.c:55
P_TOP
#define P_TOP
Definition: snowenc.c:364
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1087
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:77
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:63
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:365
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:346
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:366
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:40
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:52
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:907
intmath.h