FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "packet_internal.h"
34 #include "qpeldsp.h"
35 #include "snow_dwt.h"
36 #include "snow.h"
37 
38 #include "rangecoder.h"
39 #include "mathops.h"
40 
41 #include "mpegvideo.h"
42 #include "h263enc.h"
43 
44 #define FF_ME_ITER 3
45 
46 typedef struct SnowEncContext {
50 
51  int lambda;
52  int lambda2;
53  int pass1_rc;
54 
55  int pred;
56  int memc_only;
62 
64  MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
66 #define ME_CACHE_SIZE 1024
69 
72 
73 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
74  uint8_t *const ref[3], uint8_t *const ref2[3],
75  int x, int y, int ref_index)
76 {
77  SnowContext *s = c->avctx->priv_data;
78  const int offset[3] = {
79  y*c-> stride + x,
80  ((y*c->uvstride + x) >> s->chroma_h_shift),
81  ((y*c->uvstride + x) >> s->chroma_h_shift),
82  };
83  for (int i = 0; i < 3; i++) {
84  c->src[0][i] = src [i];
85  c->ref[0][i] = ref [i] + offset[i];
86  }
87  av_assert2(!ref_index);
88 }
89 
90 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
91 {
92  if (v) {
93  const int a = FFABS(v);
94  const int e = av_log2(a);
95  const int el = FFMIN(e, 10);
96  int i;
97 
98  put_rac(c, state + 0, 0);
99 
100  for (i = 0; i < el; i++)
101  put_rac(c, state + 1 + i, 1); //1..10
102  for(; i < e; i++)
103  put_rac(c, state + 1 + 9, 1); //1..10
104  put_rac(c, state + 1 + FFMIN(i, 9), 0);
105 
106  for (i = e - 1; i >= el; i--)
107  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
108  for(; i >= 0; i--)
109  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
110 
111  if (is_signed)
112  put_rac(c, state + 11 + el, v < 0); //11..21
113  } else {
114  put_rac(c, state + 0, 1);
115  }
116 }
117 
118 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
119 {
120  int r = log2 >= 0 ? 1<<log2 : 1;
121 
122  av_assert2(v >= 0);
123  av_assert2(log2 >= -4);
124 
125  while (v >= r) {
126  put_rac(c, state + 4 + log2, 1);
127  v -= r;
128  log2++;
129  if (log2 > 0) r += r;
130  }
131  put_rac(c, state + 4 + log2, 0);
132 
133  for (int i = log2 - 1; i >= 0; i--)
134  put_rac(c, state + 31 - i, (v >> i) & 1);
135 }
136 
138 {
139  int ret;
140 
141  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
142  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
143 
144  ret = ff_encode_alloc_frame(s->avctx, frame);
145  if (ret < 0)
146  return ret;
147  for (int i = 0; frame->data[i]; i++) {
148  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
149  frame->linesize[i] +
150  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
151  frame->data[i] += offset;
152  }
153  frame->width = s->avctx->width;
154  frame->height = s->avctx->height;
155 
156  return 0;
157 }
158 
160 {
161  SnowEncContext *const enc = avctx->priv_data;
162  SnowContext *const s = &enc->com;
163  MpegEncContext *const mpv = &enc->m;
164  int plane_index, ret;
165  int i;
166 
167  if (enc->pred == DWT_97
168  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
169  && avctx->global_quality == 0){
170  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
171  return AVERROR(EINVAL);
172  }
173 
174  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
175 
176  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
177  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
178 
179  for(plane_index=0; plane_index<3; plane_index++){
180  s->plane[plane_index].diag_mc= 1;
181  s->plane[plane_index].htaps= 6;
182  s->plane[plane_index].hcoeff[0]= 40;
183  s->plane[plane_index].hcoeff[1]= -10;
184  s->plane[plane_index].hcoeff[2]= 2;
185  s->plane[plane_index].fast_mc= 1;
186  }
187 
188  // Must be before ff_snow_common_init()
189  ff_hpeldsp_init(&s->hdsp, avctx->flags);
190  if ((ret = ff_snow_common_init(avctx)) < 0) {
191  return ret;
192  }
193 
194 #define mcf(dx,dy)\
195  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
196  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
197  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
198  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
199  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
200  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
201 
202  mcf( 0, 0)
203  mcf( 4, 0)
204  mcf( 8, 0)
205  mcf(12, 0)
206  mcf( 0, 4)
207  mcf( 4, 4)
208  mcf( 8, 4)
209  mcf(12, 4)
210  mcf( 0, 8)
211  mcf( 4, 8)
212  mcf( 8, 8)
213  mcf(12, 8)
214  mcf( 0,12)
215  mcf( 4,12)
216  mcf( 8,12)
217  mcf(12,12)
218 
219  ff_me_cmp_init(&enc->mecc, avctx);
220  ret = ff_me_init(&enc->m.me, avctx, &enc->mecc, 0);
221  if (ret < 0)
222  return ret;
223  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
224 
226 
227  s->version=0;
228 
229  mpv->avctx = avctx;
230  mpv->bit_rate= avctx->bit_rate;
231  mpv->lmin = avctx->mb_lmin;
232  mpv->lmax = avctx->mb_lmax;
233  mpv->mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
234 
235  mpv->me.temp =
236  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
237  mpv->sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
238  mpv->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*mpv->me.map));
239  if (!mpv->me.scratchpad || !mpv->me.map || !mpv->sc.obmc_scratchpad)
240  return AVERROR(ENOMEM);
241  mpv->me.score_map = mpv->me.map + ME_MAP_SIZE;
242 
243  ff_h263_encode_init(mpv); //mv_penalty
244 
245  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
246 
247  if(avctx->flags&AV_CODEC_FLAG_PASS1){
248  if(!avctx->stats_out)
249  avctx->stats_out = av_mallocz(256);
250 
251  if (!avctx->stats_out)
252  return AVERROR(ENOMEM);
253  }
254  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
255  ret = ff_rate_control_init(mpv);
256  if(ret < 0)
257  return ret;
258  }
260 
261  switch(avctx->pix_fmt){
262  case AV_PIX_FMT_YUV444P:
263 // case AV_PIX_FMT_YUV422P:
264  case AV_PIX_FMT_YUV420P:
265 // case AV_PIX_FMT_YUV411P:
266  case AV_PIX_FMT_YUV410P:
267  s->nb_planes = 3;
268  s->colorspace_type= 0;
269  break;
270  case AV_PIX_FMT_GRAY8:
271  s->nb_planes = 1;
272  s->colorspace_type = 1;
273  break;
274 /* case AV_PIX_FMT_RGB32:
275  s->colorspace= 1;
276  break;*/
277  }
278 
279  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
280  &s->chroma_v_shift);
281  if (ret)
282  return ret;
283 
284  s->input_picture = av_frame_alloc();
285  if (!s->input_picture)
286  return AVERROR(ENOMEM);
287 
288  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
289  return ret;
290 
291  if (enc->motion_est == FF_ME_ITER) {
292  int size= s->b_width * s->b_height << 2*s->block_max_depth;
293  for(i=0; i<s->max_ref_frames; i++){
294  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
295  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
296  if (!s->ref_mvs[i] || !s->ref_scores[i])
297  return AVERROR(ENOMEM);
298  }
299  }
300 
301  return 0;
302 }
303 
304 //near copy & paste from dsputil, FIXME
305 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
306 {
307  int s, i, j;
308 
309  s = 0;
310  for (i = 0; i < h; i++) {
311  for (j = 0; j < w; j++) {
312  s += pix[0];
313  pix ++;
314  }
315  pix += line_size - w;
316  }
317  return s;
318 }
319 
320 //near copy & paste from dsputil, FIXME
321 static int pix_norm1(const uint8_t * pix, int line_size, int w)
322 {
323  int s, i, j;
324  const uint32_t *sq = ff_square_tab + 256;
325 
326  s = 0;
327  for (i = 0; i < w; i++) {
328  for (j = 0; j < w; j ++) {
329  s += sq[pix[0]];
330  pix ++;
331  }
332  pix += line_size - w;
333  }
334  return s;
335 }
336 
337 static inline int get_penalty_factor(int lambda, int lambda2, int type){
338  switch(type&0xFF){
339  default:
340  case FF_CMP_SAD:
341  return lambda>>FF_LAMBDA_SHIFT;
342  case FF_CMP_DCT:
343  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
344  case FF_CMP_W53:
345  return (4*lambda)>>(FF_LAMBDA_SHIFT);
346  case FF_CMP_W97:
347  return (2*lambda)>>(FF_LAMBDA_SHIFT);
348  case FF_CMP_SATD:
349  case FF_CMP_DCT264:
350  return (2*lambda)>>FF_LAMBDA_SHIFT;
351  case FF_CMP_RD:
352  case FF_CMP_PSNR:
353  case FF_CMP_SSE:
354  case FF_CMP_NSSE:
355  return lambda2>>FF_LAMBDA_SHIFT;
356  case FF_CMP_BIT:
357  return 1;
358  }
359 }
360 
361 //FIXME copy&paste
362 #define P_LEFT P[1]
363 #define P_TOP P[2]
364 #define P_TOPRIGHT P[3]
365 #define P_MEDIAN P[4]
366 #define P_MV1 P[9]
367 #define FLAG_QPEL 1 //must be 1
368 
369 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
370 {
371  SnowContext *const s = &enc->com;
372  MotionEstContext *const c = &enc->m.me;
373  uint8_t p_buffer[1024];
374  uint8_t i_buffer[1024];
375  uint8_t p_state[sizeof(s->block_state)];
376  uint8_t i_state[sizeof(s->block_state)];
377  RangeCoder pc, ic;
378  uint8_t *pbbak= s->c.bytestream;
379  uint8_t *pbbak_start= s->c.bytestream_start;
380  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
381  const int w= s->b_width << s->block_max_depth;
382  const int h= s->b_height << s->block_max_depth;
383  const int rem_depth= s->block_max_depth - level;
384  const int index= (x + y*w) << rem_depth;
385  const int block_w= 1<<(LOG2_MB_SIZE - level);
386  int trx= (x+1)<<rem_depth;
387  int try= (y+1)<<rem_depth;
388  const BlockNode *left = x ? &s->block[index-1] : &null_block;
389  const BlockNode *top = y ? &s->block[index-w] : &null_block;
390  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
391  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
392  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
393  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
394  int pl = left->color[0];
395  int pcb= left->color[1];
396  int pcr= left->color[2];
397  int pmx, pmy;
398  int mx=0, my=0;
399  int l,cr,cb;
400  const int stride= s->current_picture->linesize[0];
401  const int uvstride= s->current_picture->linesize[1];
402  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
403  s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
404  s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
405  int P[10][2];
406  int16_t last_mv[3][2];
407  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
408  const int shift= 1+qpel;
409  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
410  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
411  int my_context= av_log2(2*FFABS(left->my - top->my));
412  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
413  int ref, best_ref, ref_score, ref_mx, ref_my;
414  int range = MAX_MV >> (1 + qpel);
415 
416  av_assert0(sizeof(s->block_state) >= 256);
417  if(s->keyframe){
418  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
419  return 0;
420  }
421 
422 // clip predictors / edge ?
423 
424  P_LEFT[0]= left->mx;
425  P_LEFT[1]= left->my;
426  P_TOP [0]= top->mx;
427  P_TOP [1]= top->my;
428  P_TOPRIGHT[0]= tr->mx;
429  P_TOPRIGHT[1]= tr->my;
430 
431  last_mv[0][0]= s->block[index].mx;
432  last_mv[0][1]= s->block[index].my;
433  last_mv[1][0]= right->mx;
434  last_mv[1][1]= right->my;
435  last_mv[2][0]= bottom->mx;
436  last_mv[2][1]= bottom->my;
437 
438  enc->m.mb_stride = 2;
439  enc->m.mb_x =
440  enc->m.mb_y = 0;
441  c->skip= 0;
442 
443  av_assert1(c-> stride == stride);
444  av_assert1(c->uvstride == uvstride);
445 
446  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
447  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
448  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
449  c->current_mv_penalty = c->mv_penalty[enc->m.f_code=1] + MAX_DMV;
450 
451  c->xmin = - x*block_w - 16+3;
452  c->ymin = - y*block_w - 16+3;
453  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
454  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
455 
456  c->xmin = FFMAX(c->xmin,-range);
457  c->xmax = FFMIN(c->xmax, range);
458  c->ymin = FFMAX(c->ymin,-range);
459  c->ymax = FFMIN(c->ymax, range);
460 
461  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
462  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
463  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
464  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
465  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
466  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
467  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
468 
469  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
470  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
471 
472  if (!y) {
473  c->pred_x= P_LEFT[0];
474  c->pred_y= P_LEFT[1];
475  } else {
476  c->pred_x = P_MEDIAN[0];
477  c->pred_y = P_MEDIAN[1];
478  }
479 
480  score= INT_MAX;
481  best_ref= 0;
482  for(ref=0; ref<s->ref_frames; ref++){
483  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
484 
485  ref_score= ff_epzs_motion_search(&enc->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
486  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
487 
488  av_assert2(ref_mx >= c->xmin);
489  av_assert2(ref_mx <= c->xmax);
490  av_assert2(ref_my >= c->ymin);
491  av_assert2(ref_my <= c->ymax);
492 
493  ref_score= c->sub_motion_search(&enc->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
494  ref_score= ff_get_mb_score(&enc->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
495  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
496  if(s->ref_mvs[ref]){
497  s->ref_mvs[ref][index][0]= ref_mx;
498  s->ref_mvs[ref][index][1]= ref_my;
499  s->ref_scores[ref][index]= ref_score;
500  }
501  if(score > ref_score){
502  score= ref_score;
503  best_ref= ref;
504  mx= ref_mx;
505  my= ref_my;
506  }
507  }
508  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
509 
510  // subpel search
511  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
512  pc= s->c;
513  pc.bytestream_start=
514  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
515  memcpy(p_state, s->block_state, sizeof(s->block_state));
516 
517  if(level!=s->block_max_depth)
518  put_rac(&pc, &p_state[4 + s_context], 1);
519  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
520  if(s->ref_frames > 1)
521  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
522  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
523  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
524  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
525  p_len= pc.bytestream - pc.bytestream_start;
526  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
527 
528  block_s= block_w*block_w;
529  sum = pix_sum(current_data[0], stride, block_w, block_w);
530  l= (sum + block_s/2)/block_s;
531  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
532 
533  if (s->nb_planes > 2) {
534  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
535  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
536  cb= (sum + block_s/2)/block_s;
537  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
538  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
539  cr= (sum + block_s/2)/block_s;
540  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
541  }else
542  cb = cr = 0;
543 
544  ic= s->c;
545  ic.bytestream_start=
546  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
547  memcpy(i_state, s->block_state, sizeof(s->block_state));
548  if(level!=s->block_max_depth)
549  put_rac(&ic, &i_state[4 + s_context], 1);
550  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
551  put_symbol(&ic, &i_state[32], l-pl , 1);
552  if (s->nb_planes > 2) {
553  put_symbol(&ic, &i_state[64], cb-pcb, 1);
554  put_symbol(&ic, &i_state[96], cr-pcr, 1);
555  }
556  i_len= ic.bytestream - ic.bytestream_start;
557  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
558 
559  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
560  av_assert1(iscore >= 0);
561  av_assert1(l>=0 && l<=255);
562  av_assert1(pl>=0 && pl<=255);
563 
564  if(level==0){
565  int varc= iscore >> 8;
566  int vard= score >> 8;
567  if (vard <= 64 || vard < varc)
568  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
569  else
570  c->scene_change_score += enc->m.qscale;
571  }
572 
573  if(level!=s->block_max_depth){
574  put_rac(&s->c, &s->block_state[4 + s_context], 0);
575  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
576  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
577  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
578  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
579  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
580 
581  if(score2 < score && score2 < iscore)
582  return score2;
583  }
584 
585  if(iscore < score){
586  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
587  memcpy(pbbak, i_buffer, i_len);
588  s->c= ic;
589  s->c.bytestream_start= pbbak_start;
590  s->c.bytestream= pbbak + i_len;
591  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
592  memcpy(s->block_state, i_state, sizeof(s->block_state));
593  return iscore;
594  }else{
595  memcpy(pbbak, p_buffer, p_len);
596  s->c= pc;
597  s->c.bytestream_start= pbbak_start;
598  s->c.bytestream= pbbak + p_len;
599  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
600  memcpy(s->block_state, p_state, sizeof(s->block_state));
601  return score;
602  }
603 }
604 
605 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
606  const int w= s->b_width << s->block_max_depth;
607  const int rem_depth= s->block_max_depth - level;
608  const int index= (x + y*w) << rem_depth;
609  int trx= (x+1)<<rem_depth;
610  BlockNode *b= &s->block[index];
611  const BlockNode *left = x ? &s->block[index-1] : &null_block;
612  const BlockNode *top = y ? &s->block[index-w] : &null_block;
613  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
614  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
615  int pl = left->color[0];
616  int pcb= left->color[1];
617  int pcr= left->color[2];
618  int pmx, pmy;
619  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
620  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
621  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
622  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
623 
624  if(s->keyframe){
625  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
626  return;
627  }
628 
629  if(level!=s->block_max_depth){
630  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
631  put_rac(&s->c, &s->block_state[4 + s_context], 1);
632  }else{
633  put_rac(&s->c, &s->block_state[4 + s_context], 0);
634  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
635  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
636  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
637  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
638  return;
639  }
640  }
641  if(b->type & BLOCK_INTRA){
642  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
643  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
644  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
645  if (s->nb_planes > 2) {
646  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
647  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
648  }
649  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
650  }else{
651  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
652  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
653  if(s->ref_frames > 1)
654  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
655  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
656  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
657  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
658  }
659 }
660 
661 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
662 {
663  SnowContext *const s = &enc->com;
664  int i, x2, y2;
665  Plane *p= &s->plane[plane_index];
666  const int block_size = MB_SIZE >> s->block_max_depth;
667  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
668  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
669  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
670  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
671  const int ref_stride= s->current_picture->linesize[plane_index];
672  const uint8_t *src = s->input_picture->data[plane_index];
673  IDWTELEM *dst= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
674  const int b_stride = s->b_width << s->block_max_depth;
675  const int w= p->width;
676  const int h= p->height;
677  int index= mb_x + mb_y*b_stride;
678  BlockNode *b= &s->block[index];
679  BlockNode backup= *b;
680  int ab=0;
681  int aa=0;
682 
683  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
684 
685  b->type|= BLOCK_INTRA;
686  b->color[plane_index]= 0;
687  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
688 
689  for(i=0; i<4; i++){
690  int mb_x2= mb_x + (i &1) - 1;
691  int mb_y2= mb_y + (i>>1) - 1;
692  int x= block_w*mb_x2 + block_w/2;
693  int y= block_h*mb_y2 + block_h/2;
694 
695  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
696  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
697 
698  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
699  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
700  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
701  int obmc_v= obmc[index];
702  int d;
703  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
704  if(x<0) obmc_v += obmc[index + block_w];
705  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
706  if(x+block_w>w) obmc_v += obmc[index - block_w];
707  //FIXME precalculate this or simplify it somehow else
708 
709  d = -dst[index] + (1<<(FRAC_BITS-1));
710  dst[index] = d;
711  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
712  aa += obmc_v * obmc_v; //FIXME precalculate this
713  }
714  }
715  }
716  *b= backup;
717 
718  return av_clip_uint8( ROUNDED_DIV((int64_t)ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
719 }
720 
721 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
722  const int b_stride = s->b_width << s->block_max_depth;
723  const int b_height = s->b_height<< s->block_max_depth;
724  int index= x + y*b_stride;
725  const BlockNode *b = &s->block[index];
726  const BlockNode *left = x ? &s->block[index-1] : &null_block;
727  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
728  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
729  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
730  int dmx, dmy;
731 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
732 // int my_context= av_log2(2*FFABS(left->my - top->my));
733 
734  if(x<0 || x>=b_stride || y>=b_height)
735  return 0;
736 /*
737 1 0 0
738 01X 1-2 1
739 001XX 3-6 2-3
740 0001XXX 7-14 4-7
741 00001XXXX 15-30 8-15
742 */
743 //FIXME try accurate rate
744 //FIXME intra and inter predictors if surrounding blocks are not the same type
745  if(b->type & BLOCK_INTRA){
746  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
747  + av_log2(2*FFABS(left->color[1] - b->color[1]))
748  + av_log2(2*FFABS(left->color[2] - b->color[2])));
749  }else{
750  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
751  dmx-= b->mx;
752  dmy-= b->my;
753  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
754  + av_log2(2*FFABS(dmy))
755  + av_log2(2*b->ref));
756  }
757 }
758 
759 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
760  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
761 {
762  SnowContext *const s = &enc->com;
763  Plane *p= &s->plane[plane_index];
764  const int block_size = MB_SIZE >> s->block_max_depth;
765  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
766  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
767  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
768  const int ref_stride= s->current_picture->linesize[plane_index];
769  uint8_t *dst= s->current_picture->data[plane_index];
770  const uint8_t *src = s->input_picture->data[plane_index];
771  IDWTELEM *pred= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
772  uint8_t *cur = s->scratchbuf;
773  uint8_t *tmp = s->emu_edge_buffer;
774  const int b_stride = s->b_width << s->block_max_depth;
775  const int b_height = s->b_height<< s->block_max_depth;
776  const int w= p->width;
777  const int h= p->height;
778  int distortion;
779  int rate= 0;
780  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
781  int sx= block_w*mb_x - block_w/2;
782  int sy= block_h*mb_y - block_h/2;
783  int x0= FFMAX(0,-sx);
784  int y0= FFMAX(0,-sy);
785  int x1= FFMIN(block_w*2, w-sx);
786  int y1= FFMIN(block_h*2, h-sy);
787  int i,x,y;
788 
789  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below chckinhg only block_w
790 
791  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
792 
793  for(y=y0; y<y1; y++){
794  const uint8_t *obmc1= obmc_edged[y];
795  const IDWTELEM *pred1 = pred + y*obmc_stride;
796  uint8_t *cur1 = cur + y*ref_stride;
797  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
798  for(x=x0; x<x1; x++){
799 #if FRAC_BITS >= LOG2_OBMC_MAX
800  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
801 #else
802  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
803 #endif
804  v = (v + pred1[x]) >> FRAC_BITS;
805  if(v&(~255)) v= ~(v>>31);
806  dst1[x] = v;
807  }
808  }
809 
810  /* copy the regions where obmc[] = (uint8_t)256 */
811  if(LOG2_OBMC_MAX == 8
812  && (mb_x == 0 || mb_x == b_stride-1)
813  && (mb_y == 0 || mb_y == b_height-1)){
814  if(mb_x == 0)
815  x1 = block_w;
816  else
817  x0 = block_w;
818  if(mb_y == 0)
819  y1 = block_h;
820  else
821  y0 = block_h;
822  for(y=y0; y<y1; y++)
823  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
824  }
825 
826  if(block_w==16){
827  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
828  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
829  /* FIXME cmps overlap but do not cover the wavelet's whole support.
830  * So improving the score of one block is not strictly guaranteed
831  * to improve the score of the whole frame, thus iterative motion
832  * estimation does not always converge. */
833  if(s->avctx->me_cmp == FF_CMP_W97)
834  distortion = ff_w97_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
835  else if(s->avctx->me_cmp == FF_CMP_W53)
836  distortion = ff_w53_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
837  else{
838  distortion = 0;
839  for(i=0; i<4; i++){
840  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
841  distortion += enc->m.me.me_cmp[0](&enc->m, src + off, dst + off, ref_stride, 16);
842  }
843  }
844  }else{
845  av_assert2(block_w==8);
846  distortion = enc->m.me.me_cmp[0](&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
847  }
848 
849  if(plane_index==0){
850  for(i=0; i<4; i++){
851 /* ..RRr
852  * .RXx.
853  * rxx..
854  */
855  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
856  }
857  if(mb_x == b_stride-2)
858  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
859  }
860  return distortion + rate*penalty_factor;
861 }
862 
863 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
864 {
865  SnowContext *const s = &enc->com;
866  int i, y2;
867  Plane *p= &s->plane[plane_index];
868  const int block_size = MB_SIZE >> s->block_max_depth;
869  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
870  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
871  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
872  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
873  const int ref_stride= s->current_picture->linesize[plane_index];
874  uint8_t *dst= s->current_picture->data[plane_index];
875  const uint8_t *src = s->input_picture->data[plane_index];
876  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
877  // const has only been removed from zero_dst to suppress a warning
878  static IDWTELEM zero_dst[4096]; //FIXME
879  const int b_stride = s->b_width << s->block_max_depth;
880  const int w= p->width;
881  const int h= p->height;
882  int distortion= 0;
883  int rate= 0;
884  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
885 
886  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below
887 
888  for(i=0; i<9; i++){
889  int mb_x2= mb_x + (i%3) - 1;
890  int mb_y2= mb_y + (i/3) - 1;
891  int x= block_w*mb_x2 + block_w/2;
892  int y= block_h*mb_y2 + block_h/2;
893 
894  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
895  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
896 
897  //FIXME find a cleaner/simpler way to skip the outside stuff
898  for(y2= y; y2<0; y2++)
899  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
900  for(y2= h; y2<y+block_h; y2++)
901  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
902  if(x<0){
903  for(y2= y; y2<y+block_h; y2++)
904  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
905  }
906  if(x+block_w > w){
907  for(y2= y; y2<y+block_h; y2++)
908  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
909  }
910 
911  av_assert1(block_w== 8 || block_w==16);
912  distortion += enc->m.me.me_cmp[block_w==8](&enc->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
913  }
914 
915  if(plane_index==0){
916  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
917  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
918 
919 /* ..RRRr
920  * .RXXx.
921  * .RXXx.
922  * rxxx.
923  */
924  if(merged)
925  rate = get_block_bits(s, mb_x, mb_y, 2);
926  for(i=merged?4:0; i<9; i++){
927  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
928  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
929  }
930  }
931  return distortion + rate*penalty_factor;
932 }
933 
934 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
935  const int w= b->width;
936  const int h= b->height;
937  int x, y;
938 
939  if(1){
940  int run=0;
941  int *runs = s->run_buffer;
942  int run_index=0;
943  int max_index;
944 
945  for(y=0; y<h; y++){
946  for(x=0; x<w; x++){
947  int v, p=0;
948  int /*ll=0, */l=0, lt=0, t=0, rt=0;
949  v= src[x + y*stride];
950 
951  if(y){
952  t= src[x + (y-1)*stride];
953  if(x){
954  lt= src[x - 1 + (y-1)*stride];
955  }
956  if(x + 1 < w){
957  rt= src[x + 1 + (y-1)*stride];
958  }
959  }
960  if(x){
961  l= src[x - 1 + y*stride];
962  /*if(x > 1){
963  if(orientation==1) ll= src[y + (x-2)*stride];
964  else ll= src[x - 2 + y*stride];
965  }*/
966  }
967  if(parent){
968  int px= x>>1;
969  int py= y>>1;
970  if(px<b->parent->width && py<b->parent->height)
971  p= parent[px + py*2*stride];
972  }
973  if(!(/*ll|*/l|lt|t|rt|p)){
974  if(v){
975  runs[run_index++]= run;
976  run=0;
977  }else{
978  run++;
979  }
980  }
981  }
982  }
983  max_index= run_index;
984  runs[run_index++]= run;
985  run_index=0;
986  run= runs[run_index++];
987 
988  put_symbol2(&s->c, b->state[30], max_index, 0);
989  if(run_index <= max_index)
990  put_symbol2(&s->c, b->state[1], run, 3);
991 
992  for(y=0; y<h; y++){
993  if(s->c.bytestream_end - s->c.bytestream < w*40){
994  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
995  return AVERROR(ENOMEM);
996  }
997  for(x=0; x<w; x++){
998  int v, p=0;
999  int /*ll=0, */l=0, lt=0, t=0, rt=0;
1000  v= src[x + y*stride];
1001 
1002  if(y){
1003  t= src[x + (y-1)*stride];
1004  if(x){
1005  lt= src[x - 1 + (y-1)*stride];
1006  }
1007  if(x + 1 < w){
1008  rt= src[x + 1 + (y-1)*stride];
1009  }
1010  }
1011  if(x){
1012  l= src[x - 1 + y*stride];
1013  /*if(x > 1){
1014  if(orientation==1) ll= src[y + (x-2)*stride];
1015  else ll= src[x - 2 + y*stride];
1016  }*/
1017  }
1018  if(parent){
1019  int px= x>>1;
1020  int py= y>>1;
1021  if(px<b->parent->width && py<b->parent->height)
1022  p= parent[px + py*2*stride];
1023  }
1024  if(/*ll|*/l|lt|t|rt|p){
1025  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1026 
1027  put_rac(&s->c, &b->state[0][context], !!v);
1028  }else{
1029  if(!run){
1030  run= runs[run_index++];
1031 
1032  if(run_index <= max_index)
1033  put_symbol2(&s->c, b->state[1], run, 3);
1034  av_assert2(v);
1035  }else{
1036  run--;
1037  av_assert2(!v);
1038  }
1039  }
1040  if(v){
1041  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1042  int l2= 2*FFABS(l) + (l<0);
1043  int t2= 2*FFABS(t) + (t<0);
1044 
1045  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1046  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1047  }
1048  }
1049  }
1050  }
1051  return 0;
1052 }
1053 
1054 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1055 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1056 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1057  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1058 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1059 }
1060 
1061 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1062  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1063 {
1064  SnowContext *const s = &enc->com;
1065  const int b_stride= s->b_width << s->block_max_depth;
1066  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1067  BlockNode backup= *block;
1068  int rd;
1069 
1070  av_assert2(mb_x>=0 && mb_y>=0);
1071  av_assert2(mb_x<b_stride);
1072 
1073  block->color[0] = p[0];
1074  block->color[1] = p[1];
1075  block->color[2] = p[2];
1076  block->type |= BLOCK_INTRA;
1077 
1078  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1079 
1080 //FIXME chroma
1081  if(rd < *best_rd){
1082  *best_rd= rd;
1083  return 1;
1084  }else{
1085  *block= backup;
1086  return 0;
1087  }
1088 }
1089 
1090 /* special case for int[2] args we discard afterwards,
1091  * fixes compilation problem with gcc 2.95 */
1093  int mb_x, int mb_y, int p0, int p1,
1094  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1095 {
1096  SnowContext *const s = &enc->com;
1097  const int b_stride = s->b_width << s->block_max_depth;
1098  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1099  BlockNode backup = *block;
1100  unsigned value;
1101  int rd, index;
1102 
1103  av_assert2(mb_x >= 0 && mb_y >= 0);
1104  av_assert2(mb_x < b_stride);
1105 
1106  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1107  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1108  if (enc->me_cache[index] == value)
1109  return 0;
1110  enc->me_cache[index] = value;
1111 
1112  block->mx = p0;
1113  block->my = p1;
1114  block->type &= ~BLOCK_INTRA;
1115 
1116  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1117 
1118 //FIXME chroma
1119  if (rd < *best_rd) {
1120  *best_rd = rd;
1121  return 1;
1122  } else {
1123  *block = backup;
1124  return 0;
1125  }
1126 }
1127 
1128 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1129  int p0, int p1, int ref, int *best_rd)
1130 {
1131  SnowContext *const s = &enc->com;
1132  const int b_stride= s->b_width << s->block_max_depth;
1133  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1134  BlockNode backup[4];
1135  unsigned value;
1136  int rd, index;
1137 
1138  /* We don't initialize backup[] during variable declaration, because
1139  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1140  * 'int16_t'". */
1141  backup[0] = block[0];
1142  backup[1] = block[1];
1143  backup[2] = block[b_stride];
1144  backup[3] = block[b_stride + 1];
1145 
1146  av_assert2(mb_x>=0 && mb_y>=0);
1147  av_assert2(mb_x<b_stride);
1148  av_assert2(((mb_x|mb_y)&1) == 0);
1149 
1150  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1151  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1152  if (enc->me_cache[index] == value)
1153  return 0;
1154  enc->me_cache[index] = value;
1155 
1156  block->mx= p0;
1157  block->my= p1;
1158  block->ref= ref;
1159  block->type &= ~BLOCK_INTRA;
1160  block[1]= block[b_stride]= block[b_stride+1]= *block;
1161 
1162  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1163 
1164 //FIXME chroma
1165  if(rd < *best_rd){
1166  *best_rd= rd;
1167  return 1;
1168  }else{
1169  block[0]= backup[0];
1170  block[1]= backup[1];
1171  block[b_stride]= backup[2];
1172  block[b_stride+1]= backup[3];
1173  return 0;
1174  }
1175 }
1176 
1177 static void iterative_me(SnowEncContext *enc)
1178 {
1179  SnowContext *const s = &enc->com;
1180  int pass, mb_x, mb_y;
1181  const int b_width = s->b_width << s->block_max_depth;
1182  const int b_height= s->b_height << s->block_max_depth;
1183  const int b_stride= b_width;
1184  int color[3];
1185 
1186  {
1187  RangeCoder r = s->c;
1188  uint8_t state[sizeof(s->block_state)];
1189  memcpy(state, s->block_state, sizeof(s->block_state));
1190  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1191  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1192  encode_q_branch(enc, 0, mb_x, mb_y);
1193  s->c = r;
1194  memcpy(s->block_state, state, sizeof(s->block_state));
1195  }
1196 
1197  for(pass=0; pass<25; pass++){
1198  int change= 0;
1199 
1200  for(mb_y= 0; mb_y<b_height; mb_y++){
1201  for(mb_x= 0; mb_x<b_width; mb_x++){
1202  int dia_change, i, j, ref;
1203  int best_rd= INT_MAX, ref_rd;
1204  BlockNode backup, ref_b;
1205  const int index= mb_x + mb_y * b_stride;
1206  BlockNode *block= &s->block[index];
1207  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1208  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1209  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1210  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1211  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1212  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1213  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1214  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1215  const int b_w= (MB_SIZE >> s->block_max_depth);
1216  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1217 
1218  if(pass && (block->type & BLOCK_OPT))
1219  continue;
1220  block->type |= BLOCK_OPT;
1221 
1222  backup= *block;
1223 
1224  if (!enc->me_cache_generation)
1225  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1226  enc->me_cache_generation += 1<<22;
1227 
1228  //FIXME precalculate
1229  {
1230  int x, y;
1231  for (y = 0; y < b_w * 2; y++)
1232  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1233  if(mb_x==0)
1234  for(y=0; y<b_w*2; y++)
1235  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1236  if(mb_x==b_stride-1)
1237  for(y=0; y<b_w*2; y++)
1238  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1239  if(mb_y==0){
1240  for(x=0; x<b_w*2; x++)
1241  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1242  for(y=1; y<b_w; y++)
1243  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1244  }
1245  if(mb_y==b_height-1){
1246  for(x=0; x<b_w*2; x++)
1247  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1248  for(y=b_w; y<b_w*2-1; y++)
1249  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1250  }
1251  }
1252 
1253  //skip stuff outside the picture
1254  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1255  const uint8_t *src = s->input_picture->data[0];
1256  uint8_t *dst= s->current_picture->data[0];
1257  const int stride= s->current_picture->linesize[0];
1258  const int block_w= MB_SIZE >> s->block_max_depth;
1259  const int block_h= MB_SIZE >> s->block_max_depth;
1260  const int sx= block_w*mb_x - block_w/2;
1261  const int sy= block_h*mb_y - block_h/2;
1262  const int w= s->plane[0].width;
1263  const int h= s->plane[0].height;
1264  int y;
1265 
1266  for(y=sy; y<0; y++)
1267  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1268  for(y=h; y<sy+block_h*2; y++)
1269  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1270  if(sx<0){
1271  for(y=sy; y<sy+block_h*2; y++)
1272  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1273  }
1274  if(sx+block_w*2 > w){
1275  for(y=sy; y<sy+block_h*2; y++)
1276  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1277  }
1278  }
1279 
1280  // intra(black) = neighbors' contribution to the current block
1281  for(i=0; i < s->nb_planes; i++)
1282  color[i]= get_dc(enc, mb_x, mb_y, i);
1283 
1284  // get previous score (cannot be cached due to OBMC)
1285  if(pass > 0 && (block->type&BLOCK_INTRA)){
1286  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1287  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1288  }else
1289  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1290 
1291  ref_b= *block;
1292  ref_rd= best_rd;
1293  for(ref=0; ref < s->ref_frames; ref++){
1294  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1295  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1296  continue;
1297  block->ref= ref;
1298  best_rd= INT_MAX;
1299 
1300  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1301  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1302  if(tb)
1303  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1304  if(lb)
1305  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1306  if(rb)
1307  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1308  if(bb)
1309  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1310 
1311  /* fullpel ME */
1312  //FIXME avoid subpel interpolation / round to nearest integer
1313  do{
1314  int newx = block->mx;
1315  int newy = block->my;
1316  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1317  dia_change=0;
1318  for(i=0; i < dia_size; i++){
1319  for(j=0; j<i; j++){
1320  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1321  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1322  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1323  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1324  }
1325  }
1326  }while(dia_change);
1327  /* subpel ME */
1328  do{
1329  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1330  dia_change=0;
1331  for(i=0; i<8; i++)
1332  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1333  }while(dia_change);
1334  //FIXME or try the standard 2 pass qpel or similar
1335 
1336  mvr[0][0]= block->mx;
1337  mvr[0][1]= block->my;
1338  if(ref_rd > best_rd){
1339  ref_rd= best_rd;
1340  ref_b= *block;
1341  }
1342  }
1343  best_rd= ref_rd;
1344  *block= ref_b;
1345  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1346  //FIXME RD style color selection
1347  if(!same_block(block, &backup)){
1348  if(tb ) tb ->type &= ~BLOCK_OPT;
1349  if(lb ) lb ->type &= ~BLOCK_OPT;
1350  if(rb ) rb ->type &= ~BLOCK_OPT;
1351  if(bb ) bb ->type &= ~BLOCK_OPT;
1352  if(tlb) tlb->type &= ~BLOCK_OPT;
1353  if(trb) trb->type &= ~BLOCK_OPT;
1354  if(blb) blb->type &= ~BLOCK_OPT;
1355  if(brb) brb->type &= ~BLOCK_OPT;
1356  change ++;
1357  }
1358  }
1359  }
1360  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1361  if(!change)
1362  break;
1363  }
1364 
1365  if(s->block_max_depth == 1){
1366  int change= 0;
1367  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1368  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1369  int i;
1370  int best_rd, init_rd;
1371  const int index= mb_x + mb_y * b_stride;
1372  BlockNode *b[4];
1373 
1374  b[0]= &s->block[index];
1375  b[1]= b[0]+1;
1376  b[2]= b[0]+b_stride;
1377  b[3]= b[2]+1;
1378  if(same_block(b[0], b[1]) &&
1379  same_block(b[0], b[2]) &&
1380  same_block(b[0], b[3]))
1381  continue;
1382 
1383  if (!enc->me_cache_generation)
1384  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1385  enc->me_cache_generation += 1<<22;
1386 
1387  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1388 
1389  //FIXME more multiref search?
1390  check_4block_inter(enc, mb_x, mb_y,
1391  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1392  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1393 
1394  for(i=0; i<4; i++)
1395  if(!(b[i]->type&BLOCK_INTRA))
1396  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1397 
1398  if(init_rd != best_rd)
1399  change++;
1400  }
1401  }
1402  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1403  }
1404 }
1405 
1406 static void encode_blocks(SnowEncContext *enc, int search)
1407 {
1408  SnowContext *const s = &enc->com;
1409  int x, y;
1410  int w= s->b_width;
1411  int h= s->b_height;
1412 
1413  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1414  iterative_me(enc);
1415 
1416  for(y=0; y<h; y++){
1417  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1418  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1419  return;
1420  }
1421  for(x=0; x<w; x++){
1422  if (enc->motion_est == FF_ME_ITER || !search)
1423  encode_q_branch2(s, 0, x, y);
1424  else
1425  encode_q_branch (enc, 0, x, y);
1426  }
1427  }
1428 }
1429 
1430 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1431  const int w= b->width;
1432  const int h= b->height;
1433  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1434  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1435  int x,y, thres1, thres2;
1436 
1437  if(s->qlog == LOSSLESS_QLOG){
1438  for(y=0; y<h; y++)
1439  for(x=0; x<w; x++)
1440  dst[x + y*stride]= src[x + y*stride];
1441  return;
1442  }
1443 
1444  bias= bias ? 0 : (3*qmul)>>3;
1445  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1446  thres2= 2*thres1;
1447 
1448  if(!bias){
1449  for(y=0; y<h; y++){
1450  for(x=0; x<w; x++){
1451  int i= src[x + y*stride];
1452 
1453  if((unsigned)(i+thres1) > thres2){
1454  if(i>=0){
1455  i<<= QEXPSHIFT;
1456  i/= qmul; //FIXME optimize
1457  dst[x + y*stride]= i;
1458  }else{
1459  i= -i;
1460  i<<= QEXPSHIFT;
1461  i/= qmul; //FIXME optimize
1462  dst[x + y*stride]= -i;
1463  }
1464  }else
1465  dst[x + y*stride]= 0;
1466  }
1467  }
1468  }else{
1469  for(y=0; y<h; y++){
1470  for(x=0; x<w; x++){
1471  int i= src[x + y*stride];
1472 
1473  if((unsigned)(i+thres1) > thres2){
1474  if(i>=0){
1475  i<<= QEXPSHIFT;
1476  i= (i + bias) / qmul; //FIXME optimize
1477  dst[x + y*stride]= i;
1478  }else{
1479  i= -i;
1480  i<<= QEXPSHIFT;
1481  i= (i + bias) / qmul; //FIXME optimize
1482  dst[x + y*stride]= -i;
1483  }
1484  }else
1485  dst[x + y*stride]= 0;
1486  }
1487  }
1488  }
1489 }
1490 
1492  const int w= b->width;
1493  const int h= b->height;
1494  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1495  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1496  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1497  int x,y;
1498 
1499  if(s->qlog == LOSSLESS_QLOG) return;
1500 
1501  for(y=0; y<h; y++){
1502  for(x=0; x<w; x++){
1503  int i= src[x + y*stride];
1504  if(i<0){
1505  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1506  }else if(i>0){
1507  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1508  }
1509  }
1510  }
1511 }
1512 
1513 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1514  const int w= b->width;
1515  const int h= b->height;
1516  int x,y;
1517 
1518  for(y=h-1; y>=0; y--){
1519  for(x=w-1; x>=0; x--){
1520  int i= x + y*stride;
1521 
1522  if(x){
1523  if(use_median){
1524  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1525  else src[i] -= src[i - 1];
1526  }else{
1527  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1528  else src[i] -= src[i - 1];
1529  }
1530  }else{
1531  if(y) src[i] -= src[i - stride];
1532  }
1533  }
1534  }
1535 }
1536 
1537 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1538  const int w= b->width;
1539  const int h= b->height;
1540  int x,y;
1541 
1542  for(y=0; y<h; y++){
1543  for(x=0; x<w; x++){
1544  int i= x + y*stride;
1545 
1546  if(x){
1547  if(use_median){
1548  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1549  else src[i] += src[i - 1];
1550  }else{
1551  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1552  else src[i] += src[i - 1];
1553  }
1554  }else{
1555  if(y) src[i] += src[i - stride];
1556  }
1557  }
1558  }
1559 }
1560 
1562  int plane_index, level, orientation;
1563 
1564  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1565  for(level=0; level<s->spatial_decomposition_count; level++){
1566  for(orientation=level ? 1:0; orientation<4; orientation++){
1567  if(orientation==2) continue;
1568  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1569  }
1570  }
1571  }
1572 }
1573 
1575  int plane_index, i;
1576  uint8_t kstate[32];
1577 
1578  memset(kstate, MID_STATE, sizeof(kstate));
1579 
1580  put_rac(&s->c, kstate, s->keyframe);
1581  if(s->keyframe || s->always_reset){
1583  s->last_spatial_decomposition_type=
1584  s->last_qlog=
1585  s->last_qbias=
1586  s->last_mv_scale=
1587  s->last_block_max_depth= 0;
1588  for(plane_index=0; plane_index<2; plane_index++){
1589  Plane *p= &s->plane[plane_index];
1590  p->last_htaps=0;
1591  p->last_diag_mc=0;
1592  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1593  }
1594  }
1595  if(s->keyframe){
1596  put_symbol(&s->c, s->header_state, s->version, 0);
1597  put_rac(&s->c, s->header_state, s->always_reset);
1598  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1599  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1600  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1601  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1602  if (s->nb_planes > 2) {
1603  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1604  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1605  }
1606  put_rac(&s->c, s->header_state, s->spatial_scalability);
1607 // put_rac(&s->c, s->header_state, s->rate_scalability);
1608  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1609 
1610  encode_qlogs(s);
1611  }
1612 
1613  if(!s->keyframe){
1614  int update_mc=0;
1615  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1616  Plane *p= &s->plane[plane_index];
1617  update_mc |= p->last_htaps != p->htaps;
1618  update_mc |= p->last_diag_mc != p->diag_mc;
1619  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1620  }
1621  put_rac(&s->c, s->header_state, update_mc);
1622  if(update_mc){
1623  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1624  Plane *p= &s->plane[plane_index];
1625  put_rac(&s->c, s->header_state, p->diag_mc);
1626  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1627  for(i= p->htaps/2; i; i--)
1628  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1629  }
1630  }
1631  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1632  put_rac(&s->c, s->header_state, 1);
1633  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1634  encode_qlogs(s);
1635  }else
1636  put_rac(&s->c, s->header_state, 0);
1637  }
1638 
1639  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1640  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1641  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1642  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1643  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1644 
1645 }
1646 
1648  int plane_index;
1649 
1650  if(!s->keyframe){
1651  for(plane_index=0; plane_index<2; plane_index++){
1652  Plane *p= &s->plane[plane_index];
1653  p->last_diag_mc= p->diag_mc;
1654  p->last_htaps = p->htaps;
1655  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1656  }
1657  }
1658 
1659  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1660  s->last_qlog = s->qlog;
1661  s->last_qbias = s->qbias;
1662  s->last_mv_scale = s->mv_scale;
1663  s->last_block_max_depth = s->block_max_depth;
1664  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1665 }
1666 
1667 static int qscale2qlog(int qscale){
1668  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1669  + 61*QROOT/8; ///< 64 > 60
1670 }
1671 
1673 {
1674  SnowContext *const s = &enc->com;
1675  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1676  * FIXME we know exact mv bits at this point,
1677  * but ratecontrol isn't set up to include them. */
1678  uint32_t coef_sum= 0;
1679  int level, orientation, delta_qlog;
1680 
1681  for(level=0; level<s->spatial_decomposition_count; level++){
1682  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1683  SubBand *b= &s->plane[0].band[level][orientation];
1684  IDWTELEM *buf= b->ibuf;
1685  const int w= b->width;
1686  const int h= b->height;
1687  const int stride= b->stride;
1688  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1689  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1690  const int qdiv= (1<<16)/qmul;
1691  int x, y;
1692  //FIXME this is ugly
1693  for(y=0; y<h; y++)
1694  for(x=0; x<w; x++)
1695  buf[x+y*stride]= b->buf[x+y*stride];
1696  if(orientation==0)
1697  decorrelate(s, b, buf, stride, 1, 0);
1698  for(y=0; y<h; y++)
1699  for(x=0; x<w; x++)
1700  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1701  }
1702  }
1703 
1704  /* ugly, ratecontrol just takes a sqrt again */
1705  av_assert0(coef_sum < INT_MAX);
1706  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1707 
1708  if(pict->pict_type == AV_PICTURE_TYPE_I){
1709  enc->m.mb_var_sum = coef_sum;
1710  enc->m.mc_mb_var_sum = 0;
1711  }else{
1712  enc->m.mc_mb_var_sum = coef_sum;
1713  enc->m.mb_var_sum = 0;
1714  }
1715 
1716  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1717  if (pict->quality < 0)
1718  return INT_MIN;
1719  enc->lambda= pict->quality * 3/2;
1720  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1721  s->qlog+= delta_qlog;
1722  return delta_qlog;
1723 }
1724 
1726  int width = p->width;
1727  int height= p->height;
1728  int level, orientation, x, y;
1729 
1730  for(level=0; level<s->spatial_decomposition_count; level++){
1731  int64_t error=0;
1732  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1733  SubBand *b= &p->band[level][orientation];
1734  IDWTELEM *ibuf= b->ibuf;
1735 
1736  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1737  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1738  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1739  for(y=0; y<height; y++){
1740  for(x=0; x<width; x++){
1741  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1742  error += d*d;
1743  }
1744  }
1745  if (orientation == 2)
1746  error /= 2;
1747  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1748  if (orientation != 1)
1749  error = 0;
1750  }
1751  p->band[level][1].qlog = p->band[level][2].qlog;
1752  }
1753 }
1754 
1756  const AVFrame *pict, int *got_packet)
1757 {
1758  SnowEncContext *const enc = avctx->priv_data;
1759  SnowContext *const s = &enc->com;
1760  MpegEncContext *const mpv = &enc->m;
1761  RangeCoder * const c= &s->c;
1762  AVCodecInternal *avci = avctx->internal;
1763  AVFrame *pic;
1764  const int width= s->avctx->width;
1765  const int height= s->avctx->height;
1766  int level, orientation, plane_index, i, y, ret;
1767  uint8_t rc_header_bak[sizeof(s->header_state)];
1768  uint8_t rc_block_bak[sizeof(s->block_state)];
1769 
1770  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1771  return ret;
1772 
1774  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1775 
1776  for(i=0; i < s->nb_planes; i++){
1777  int hshift= i ? s->chroma_h_shift : 0;
1778  int vshift= i ? s->chroma_v_shift : 0;
1779  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1780  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1781  &pict->data[i][y * pict->linesize[i]],
1782  AV_CEIL_RSHIFT(width, hshift));
1783  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1785  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1786  EDGE_TOP | EDGE_BOTTOM);
1787 
1788  }
1789  emms_c();
1790  pic = s->input_picture;
1791  pic->pict_type = pict->pict_type;
1792  pic->quality = pict->quality;
1793 
1794  mpv->picture_number = avctx->frame_num;
1795  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1796  mpv->pict_type = pic->pict_type = mpv->rc_context.entry[avctx->frame_num].new_pict_type;
1797  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1798  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1799  pic->quality = ff_rate_estimate_qscale(mpv, 0);
1800  if (pic->quality < 0)
1801  return -1;
1802  }
1803  }else{
1804  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1805  mpv->pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1806  }
1807 
1808  if (enc->pass1_rc && avctx->frame_num == 0)
1809  pic->quality = 2*FF_QP2LAMBDA;
1810  if (pic->quality) {
1811  s->qlog = qscale2qlog(pic->quality);
1812  enc->lambda = pic->quality * 3/2;
1813  }
1814  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1815  s->qlog= LOSSLESS_QLOG;
1816  enc->lambda = 0;
1817  }//else keep previous frame's qlog until after motion estimation
1818 
1819  if (s->current_picture->data[0]) {
1820  int w = s->avctx->width;
1821  int h = s->avctx->height;
1822 
1823  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1824  s->current_picture->linesize[0], w , h ,
1826  if (s->current_picture->data[2]) {
1827  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1828  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1829  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1830  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1831  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1832  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1833  }
1834  emms_c();
1835  }
1836 
1838  ret = get_encode_buffer(s, s->current_picture);
1839  if (ret < 0)
1840  return ret;
1841 
1842  mpv->cur_pic.ptr = &enc->cur_pic;
1843  mpv->cur_pic.ptr->f = s->current_picture;
1844  mpv->cur_pic.ptr->f->pts = pict->pts;
1845  if(pic->pict_type == AV_PICTURE_TYPE_P){
1846  int block_width = (width +15)>>4;
1847  int block_height= (height+15)>>4;
1848  int stride= s->current_picture->linesize[0];
1849 
1850  av_assert0(s->current_picture->data[0]);
1851  av_assert0(s->last_picture[0]->data[0]);
1852 
1853  mpv->avctx = s->avctx;
1854  mpv->last_pic.ptr = &enc->last_pic;
1855  mpv->last_pic.ptr->f = s->last_picture[0];
1856  mpv-> new_pic = s->input_picture;
1857  mpv->linesize = stride;
1858  mpv->uvlinesize = s->current_picture->linesize[1];
1859  mpv->width = width;
1860  mpv->height = height;
1861  mpv->mb_width = block_width;
1862  mpv->mb_height = block_height;
1863  mpv->mb_stride = mpv->mb_width + 1;
1864  mpv->b8_stride = 2 * mpv->mb_width + 1;
1865  mpv->f_code = 1;
1866  mpv->pict_type = pic->pict_type;
1867  mpv->motion_est = enc->motion_est;
1868  mpv->me.scene_change_score = 0;
1869  mpv->me.dia_size = avctx->dia_size;
1870  mpv->quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1871  mpv->out_format = FMT_H263;
1872  mpv->unrestricted_mv = 1;
1873 
1874  mpv->lambda = enc->lambda;
1875  mpv->qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1876  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1877 
1878  mpv->qdsp = enc->qdsp; //move
1879  mpv->hdsp = s->hdsp;
1880  ff_me_init_pic(&enc->m);
1881  s->hdsp = mpv->hdsp;
1882  }
1883 
1884  if (enc->pass1_rc) {
1885  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1886  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1887  }
1888 
1889 redo_frame:
1890 
1891  s->spatial_decomposition_count= 5;
1892 
1893  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1894  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1895  s->spatial_decomposition_count--;
1896 
1897  if (s->spatial_decomposition_count <= 0) {
1898  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1899  return AVERROR(EINVAL);
1900  }
1901 
1902  mpv->pict_type = pic->pict_type;
1903  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1904 
1906 
1907  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1908  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1909  calculate_visual_weight(s, &s->plane[plane_index]);
1910  }
1911  }
1912 
1913  encode_header(s);
1914  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1915  encode_blocks(enc, 1);
1916  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1917 
1918  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1919  Plane *p= &s->plane[plane_index];
1920  int w= p->width;
1921  int h= p->height;
1922  int x, y;
1923 // int bits= put_bits_count(&s->c.pb);
1924 
1925  if (!enc->memc_only) {
1926  //FIXME optimize
1927  if(pict->data[plane_index]) //FIXME gray hack
1928  for(y=0; y<h; y++){
1929  for(x=0; x<w; x++){
1930  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1931  }
1932  }
1933  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1934 
1935  if( plane_index==0
1936  && pic->pict_type == AV_PICTURE_TYPE_P
1937  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1938  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1940  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1942  s->keyframe=1;
1943  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1944  goto redo_frame;
1945  }
1946 
1947  if(s->qlog == LOSSLESS_QLOG){
1948  for(y=0; y<h; y++){
1949  for(x=0; x<w; x++){
1950  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1951  }
1952  }
1953  }else{
1954  for(y=0; y<h; y++){
1955  for(x=0; x<w; x++){
1956  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1957  }
1958  }
1959  }
1960 
1961  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1962 
1963  if (enc->pass1_rc && plane_index==0) {
1964  int delta_qlog = ratecontrol_1pass(enc, pic);
1965  if (delta_qlog <= INT_MIN)
1966  return -1;
1967  if(delta_qlog){
1968  //reordering qlog in the bitstream would eliminate this reset
1970  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1971  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1972  encode_header(s);
1973  encode_blocks(enc, 0);
1974  }
1975  }
1976 
1977  for(level=0; level<s->spatial_decomposition_count; level++){
1978  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1979  SubBand *b= &p->band[level][orientation];
1980 
1981  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1982  if(orientation==0)
1983  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1984  if (!enc->no_bitstream)
1985  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1986  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1987  if(orientation==0)
1988  correlate(s, b, b->ibuf, b->stride, 1, 0);
1989  }
1990  }
1991 
1992  for(level=0; level<s->spatial_decomposition_count; level++){
1993  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1994  SubBand *b= &p->band[level][orientation];
1995 
1996  dequantize(s, b, b->ibuf, b->stride);
1997  }
1998  }
1999 
2000  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
2001  if(s->qlog == LOSSLESS_QLOG){
2002  for(y=0; y<h; y++){
2003  for(x=0; x<w; x++){
2004  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2005  }
2006  }
2007  }
2008  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2009  }else{
2010  //ME/MC only
2011  if(pic->pict_type == AV_PICTURE_TYPE_I){
2012  for(y=0; y<h; y++){
2013  for(x=0; x<w; x++){
2014  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2015  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2016  }
2017  }
2018  }else{
2019  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2020  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2021  }
2022  }
2023  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2024  int64_t error= 0;
2025 
2026  if(pict->data[plane_index]) //FIXME gray hack
2027  for(y=0; y<h; y++){
2028  for(x=0; x<w; x++){
2029  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2030  error += d*d;
2031  }
2032  }
2033  s->avctx->error[plane_index] += error;
2034  enc->encoding_error[plane_index] = error;
2035  }
2036 
2037  }
2038  emms_c();
2039 
2041 
2042  ff_snow_release_buffer(avctx);
2043 
2044  s->current_picture->pict_type = pic->pict_type;
2045  s->current_picture->quality = pic->quality;
2046  mpv->frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2047  mpv->p_tex_bits = mpv->frame_bits - mpv->misc_bits - mpv->mv_bits;
2048  mpv->total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2050  enc->cur_pic.coded_picture_number = avctx->frame_num;
2051  enc->cur_pic.f->quality = pic->quality;
2052  if (enc->pass1_rc)
2053  if (ff_rate_estimate_qscale(mpv, 0) < 0)
2054  return -1;
2055  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2056  ff_write_pass1_stats(mpv);
2057  mpv->last_pict_type = mpv->pict_type;
2058 
2059  emms_c();
2060 
2061  ff_side_data_set_encoder_stats(pkt, s->current_picture->quality,
2062  enc->encoding_error,
2063  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2064  s->current_picture->pict_type);
2065  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2066  av_frame_replace(avci->recon_frame, s->current_picture);
2067  }
2068 
2069  pkt->size = ff_rac_terminate(c, 0);
2070  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2072  *got_packet = 1;
2073 
2074  return 0;
2075 }
2076 
2078 {
2079  SnowEncContext *const enc = avctx->priv_data;
2080  SnowContext *const s = &enc->com;
2081 
2084  av_frame_free(&s->input_picture);
2085 
2086  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2087  av_freep(&s->ref_mvs[i]);
2088  av_freep(&s->ref_scores[i]);
2089  }
2090 
2091  enc->m.me.temp = NULL;
2092  av_freep(&enc->m.me.scratchpad);
2093  av_freep(&enc->m.me.map);
2094  av_freep(&enc->m.sc.obmc_scratchpad);
2095 
2096  av_freep(&avctx->stats_out);
2097 
2098  return 0;
2099 }
2100 
2101 #define OFFSET(x) offsetof(SnowEncContext, x)
2102 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2103 static const AVOption options[] = {
2104  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2105  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2106  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2107  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2108  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2109  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2110  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2111  { "intra_penalty", "Penalty for intra blocks in block decission", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2112  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2113  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2114  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2115  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2116  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2117  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2118  "defined in the section 'Expression Evaluation', the following functions are available: "
2119  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2120  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2121  OFFSET(m.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2122  { NULL },
2123 };
2124 
2125 static const AVClass snowenc_class = {
2126  .class_name = "snow encoder",
2127  .item_name = av_default_item_name,
2128  .option = options,
2129  .version = LIBAVUTIL_VERSION_INT,
2130 };
2131 
2133  .p.name = "snow",
2134  CODEC_LONG_NAME("Snow"),
2135  .p.type = AVMEDIA_TYPE_VIDEO,
2136  .p.id = AV_CODEC_ID_SNOW,
2137  .p.capabilities = AV_CODEC_CAP_DR1 |
2140  .priv_data_size = sizeof(SnowEncContext),
2141  .init = encode_init,
2143  .close = encode_end,
2144  .p.pix_fmts = (const enum AVPixelFormat[]){
2148  },
2149  .color_ranges = AVCOL_RANGE_MPEG,
2150  .p.priv_class = &snowenc_class,
2151  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2152 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1054
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1513
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:402
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
Plane::last_diag_mc
int last_diag_mc
Definition: snow.h:110
P_LEFT
#define P_LEFT
Definition: snowenc.c:362
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:429
ScratchpadContext::obmc_scratchpad
uint8_t * obmc_scratchpad
Definition: mpegpicture.h:36
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:51
libm.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:308
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:513
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1672
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
MpegEncContext::total_bits
int64_t total_bits
Definition: mpegvideo.h:334
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:42
inverse
inverse
Definition: af_crystalizer.c:122
MpegEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:338
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2077
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:61
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
MotionEstContext
Motion estimation context.
Definition: motion_est.h:48
int64_t
long long int64_t
Definition: coverity.c:34
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:174
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:156
h263enc.h
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
DWT_97
#define DWT_97
Definition: snow_dwt.h:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:128
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:38
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1647
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1177
AVPacket::data
uint8_t * data
Definition: packet.h:539
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:124
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:41
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:48
DWT_53
#define DWT_53
Definition: snow_dwt.h:71
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:337
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:934
rangecoder.h
FFCodec
Definition: codec_internal.h:127
MpegEncContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:215
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:497
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1755
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:554
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
Plane::diag_mc
int diag_mc
Definition: snow.h:105
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1128
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:68
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1406
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:721
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1061
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2101
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:285
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
MpegEncContext::width
int width
Definition: mpegvideo.h:96
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:863
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:320
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:902
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
SnowEncContext
Definition: snowenc.c:46
MpegEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:253
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:895
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:450
lrint
#define lrint
Definition: tablegen.h:53
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:36
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:369
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:906
emms_c
#define emms_c()
Definition: emms.h:63
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:63
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1249
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1725
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
MpegEncContext::bit_rate
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:99
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:220
MpegEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:335
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:321
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:395
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:137
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:70
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:71
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MpegEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:254
MECmpContext
Definition: me_cmp.h:55
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
ff_epzs_motion_search
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
run
uint8_t run
Definition: svq3.c:204
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:67
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:228
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, ptrdiff_t wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:43
snow.h
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
state
static struct @466 state
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:759
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
VE
#define VE
Definition: snowenc.c:2102
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:221
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:514
mathops.h
options
Definition: swscale.c:42
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:341
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:126
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1537
ff_w53_32_c
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
QROOT
#define QROOT
Definition: snow.h:43
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:281
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:39
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:43
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:73
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:90
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:847
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1344
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:55
AVPacket::size
int size
Definition: packet.h:540
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1037
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
height
#define height
Definition: dsp.h:85
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1574
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:905
Plane::height
int height
Definition: cfhd.h:119
P
#define P
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:521
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:53
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:912
Plane::last_hcoeff
int8_t last_hcoeff[HTAPS_MAX/2]
Definition: snow.h:109
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:225
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:305
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:59
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2132
SubBand
Definition: cfhd.h:108
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:387
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:903
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
Plane::htaps
int htaps
Definition: snow.h:103
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
Plane::last_htaps
int last_htaps
Definition: snow.h:108
Plane::width
int width
Definition: cfhd.h:118
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:58
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_me_init_pic
void ff_me_init_pic(MpegEncContext *s)
Definition: motion_est.c:370
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:901
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:605
ff_get_mb_score
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:60
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:104
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:104
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
SnowEncContext::m
MpegEncContext m
Definition: snowenc.c:64
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:123
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:264
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:907
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:721
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1667
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:924
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:830
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:911
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:1010
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:128
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:395
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:57
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:66
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
SnowEncContext::com
SnowContext com
Definition: snowenc.c:47
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:44
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:661
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:700
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:122
log2
#define log2(x)
Definition: libm.h:404
MotionEstContext::score_map
uint32_t * score_map
map to store the scores
Definition: motion_est.h:57
MpegEncContext::motion_est
int motion_est
ME algorithm.
Definition: mpegvideo.h:257
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2041
mid_pred
#define mid_pred
Definition: mathops.h:96
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:97
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:49
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:148
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:159
options
static const AVOption options[]
Definition: snowenc.c:2103
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:114
square
static int square(int x)
Definition: roqvideoenc.c:196
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:53
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:63
me_cmp.h
SubBand::qlog
int qlog
log(qscale)/log[2^(1/6)]
Definition: snow.h:87
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1561
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:499
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:267
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
SnowEncContext::cur_pic
MPVPicture cur_pic
Definition: snowenc.c:65
SnowEncContext::last_pic
MPVPicture last_pic
Definition: snowenc.c:65
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
MpegEncContext::lmin
int lmin
Definition: mpegvideo.h:523
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:904
MpegEncContext::lmax
int lmax
Definition: mpegvideo.h:523
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:79
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:1017
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:118
Plane
Definition: cfhd.h:117
MotionEstContext::map
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:56
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:210
mem.h
packet_internal.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:523
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:915
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1430
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:56
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1491
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
ff_w97_32_c
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
MpegEncContext::last_pict_type
int last_pict_type
Definition: mpegvideo.h:208
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:85
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:346
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:609
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2125
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
SnowEncContext::pred
int pred
Definition: snowenc.c:55
P_TOP
#define P_TOP
Definition: snowenc.c:363
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1092
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:77
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:364
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:344
src
#define src
Definition: vp8dsp.c:248
MotionEstContext::me_cmp
me_cmp_func me_cmp[6]
Definition: motion_est.h:88
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:365
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:41
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:52
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:913
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:700
intmath.h