FFmpeg
snow.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/log.h"
22 #include "libavutil/thread.h"
23 #include "avcodec.h"
24 #include "decode.h"
25 #include "encode.h"
26 #include "snow_dwt.h"
27 #include "snow.h"
28 #include "snowdata.h"
29 
30 
31 void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
32  int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
33  int y, x;
34  IDWTELEM * dst;
35  for(y=0; y<b_h; y++){
36  //FIXME ugly misuse of obmc_stride
37  const uint8_t *obmc1= obmc + y*obmc_stride;
38  const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
39  const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
40  const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
41  dst = slice_buffer_get_line(sb, src_y + y);
42  for(x=0; x<b_w; x++){
43  int v= obmc1[x] * block[3][x + y*src_stride]
44  +obmc2[x] * block[2][x + y*src_stride]
45  +obmc3[x] * block[1][x + y*src_stride]
46  +obmc4[x] * block[0][x + y*src_stride];
47 
48  v <<= 8 - LOG2_OBMC_MAX;
49  if(FRAC_BITS != 8){
50  v >>= 8 - FRAC_BITS;
51  }
52  if(add){
53  v += dst[x + src_x];
54  v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
55  if(v&(~255)) v= ~(v>>31);
56  dst8[x + y*src_stride] = v;
57  }else{
58  dst[x + src_x] -= v;
59  }
60  }
61  }
62 }
63 
65 {
66  int ret, i;
67  int edges_needed = av_codec_is_encoder(s->avctx->codec);
68 
69  frame->width = s->avctx->width ;
70  frame->height = s->avctx->height;
71  if (edges_needed) {
72  frame->width += 2 * EDGE_WIDTH;
73  frame->height += 2 * EDGE_WIDTH;
74 
75  ret = ff_encode_alloc_frame(s->avctx, frame);
76  } else
78  if (ret < 0)
79  return ret;
80  if (edges_needed) {
81  for (i = 0; frame->data[i]; i++) {
82  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
83  frame->linesize[i] +
84  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
85  frame->data[i] += offset;
86  }
87  frame->width = s->avctx->width;
88  frame->height = s->avctx->height;
89  }
90 
91  return 0;
92 }
93 
94 void ff_snow_reset_contexts(SnowContext *s){ //FIXME better initial contexts
95  int plane_index, level, orientation;
96 
97  for(plane_index=0; plane_index<3; plane_index++){
99  for(orientation=level ? 1:0; orientation<4; orientation++){
100  memset(s->plane[plane_index].band[level][orientation].state, MID_STATE, sizeof(s->plane[plane_index].band[level][orientation].state));
101  }
102  }
103  }
104  memset(s->header_state, MID_STATE, sizeof(s->header_state));
105  memset(s->block_state, MID_STATE, sizeof(s->block_state));
106 }
107 
109  int w= AV_CEIL_RSHIFT(s->avctx->width, LOG2_MB_SIZE);
110  int h= AV_CEIL_RSHIFT(s->avctx->height, LOG2_MB_SIZE);
111 
112  s->b_width = w;
113  s->b_height= h;
114 
115  av_free(s->block);
116  s->block = av_calloc(w * h, sizeof(*s->block) << (s->block_max_depth*2));
117  if (!s->block)
118  return AVERROR(ENOMEM);
119 
120  return 0;
121 }
122 
123 static void mc_block(Plane *p, uint8_t *dst, const uint8_t *src, int stride, int b_w, int b_h, int dx, int dy){
124  static const uint8_t weight[64]={
125  8,7,6,5,4,3,2,1,
126  7,7,0,0,0,0,0,1,
127  6,0,6,0,0,0,2,0,
128  5,0,0,5,0,3,0,0,
129  4,0,0,0,4,0,0,0,
130  3,0,0,5,0,3,0,0,
131  2,0,6,0,0,0,2,0,
132  1,7,0,0,0,0,0,1,
133  };
134 
135  static const uint8_t brane[256]={
136  0x00,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x11,0x12,0x12,0x12,0x12,0x12,0x12,0x12,
137  0x04,0x05,0xcc,0xcc,0xcc,0xcc,0xcc,0x41,0x15,0x16,0xcc,0xcc,0xcc,0xcc,0xcc,0x52,
138  0x04,0xcc,0x05,0xcc,0xcc,0xcc,0x41,0xcc,0x15,0xcc,0x16,0xcc,0xcc,0xcc,0x52,0xcc,
139  0x04,0xcc,0xcc,0x05,0xcc,0x41,0xcc,0xcc,0x15,0xcc,0xcc,0x16,0xcc,0x52,0xcc,0xcc,
140  0x04,0xcc,0xcc,0xcc,0x41,0xcc,0xcc,0xcc,0x15,0xcc,0xcc,0xcc,0x16,0xcc,0xcc,0xcc,
141  0x04,0xcc,0xcc,0x41,0xcc,0x05,0xcc,0xcc,0x15,0xcc,0xcc,0x52,0xcc,0x16,0xcc,0xcc,
142  0x04,0xcc,0x41,0xcc,0xcc,0xcc,0x05,0xcc,0x15,0xcc,0x52,0xcc,0xcc,0xcc,0x16,0xcc,
143  0x04,0x41,0xcc,0xcc,0xcc,0xcc,0xcc,0x05,0x15,0x52,0xcc,0xcc,0xcc,0xcc,0xcc,0x16,
144  0x44,0x45,0x45,0x45,0x45,0x45,0x45,0x45,0x55,0x56,0x56,0x56,0x56,0x56,0x56,0x56,
145  0x48,0x49,0xcc,0xcc,0xcc,0xcc,0xcc,0x85,0x59,0x5A,0xcc,0xcc,0xcc,0xcc,0xcc,0x96,
146  0x48,0xcc,0x49,0xcc,0xcc,0xcc,0x85,0xcc,0x59,0xcc,0x5A,0xcc,0xcc,0xcc,0x96,0xcc,
147  0x48,0xcc,0xcc,0x49,0xcc,0x85,0xcc,0xcc,0x59,0xcc,0xcc,0x5A,0xcc,0x96,0xcc,0xcc,
148  0x48,0xcc,0xcc,0xcc,0x49,0xcc,0xcc,0xcc,0x59,0xcc,0xcc,0xcc,0x96,0xcc,0xcc,0xcc,
149  0x48,0xcc,0xcc,0x85,0xcc,0x49,0xcc,0xcc,0x59,0xcc,0xcc,0x96,0xcc,0x5A,0xcc,0xcc,
150  0x48,0xcc,0x85,0xcc,0xcc,0xcc,0x49,0xcc,0x59,0xcc,0x96,0xcc,0xcc,0xcc,0x5A,0xcc,
151  0x48,0x85,0xcc,0xcc,0xcc,0xcc,0xcc,0x49,0x59,0x96,0xcc,0xcc,0xcc,0xcc,0xcc,0x5A,
152  };
153 
154  static const uint8_t needs[16]={
155  0,1,0,0,
156  2,4,2,0,
157  0,1,0,0,
158  15
159  };
160 
161  int x, y, b, r, l;
162  int16_t tmpIt [64*(32+HTAPS_MAX)];
163  uint8_t tmp2t[3][64*(32+HTAPS_MAX)];
164  int16_t *tmpI= tmpIt;
165  uint8_t *tmp2= tmp2t[0];
166  const uint8_t *hpel[11];
167  av_assert2(dx<16 && dy<16);
168  r= brane[dx + 16*dy]&15;
169  l= brane[dx + 16*dy]>>4;
170 
171  b= needs[l] | needs[r];
172  if(p && !p->diag_mc)
173  b= 15;
174 
175  if(b&5){
176  for(y=0; y < b_h+HTAPS_MAX-1; y++){
177  for(x=0; x < b_w; x++){
178  int a_1=src[x + HTAPS_MAX/2-4];
179  int a0= src[x + HTAPS_MAX/2-3];
180  int a1= src[x + HTAPS_MAX/2-2];
181  int a2= src[x + HTAPS_MAX/2-1];
182  int a3= src[x + HTAPS_MAX/2+0];
183  int a4= src[x + HTAPS_MAX/2+1];
184  int a5= src[x + HTAPS_MAX/2+2];
185  int a6= src[x + HTAPS_MAX/2+3];
186  int am=0;
187  if(!p || p->fast_mc){
188  am= 20*(a2+a3) - 5*(a1+a4) + (a0+a5);
189  tmpI[x]= am;
190  am= (am+16)>>5;
191  }else{
192  am= p->hcoeff[0]*(a2+a3) + p->hcoeff[1]*(a1+a4) + p->hcoeff[2]*(a0+a5) + p->hcoeff[3]*(a_1+a6);
193  tmpI[x]= am;
194  am= (am+32)>>6;
195  }
196 
197  if(am&(~255)) am= ~(am>>31);
198  tmp2[x]= am;
199  }
200  tmpI+= 64;
201  tmp2+= 64;
202  src += stride;
203  }
204  src -= stride*y;
205  }
206  src += HTAPS_MAX/2 - 1;
207  tmp2= tmp2t[1];
208 
209  if(b&2){
210  for(y=0; y < b_h; y++){
211  for(x=0; x < b_w+1; x++){
212  int a_1=src[x + (HTAPS_MAX/2-4)*stride];
213  int a0= src[x + (HTAPS_MAX/2-3)*stride];
214  int a1= src[x + (HTAPS_MAX/2-2)*stride];
215  int a2= src[x + (HTAPS_MAX/2-1)*stride];
216  int a3= src[x + (HTAPS_MAX/2+0)*stride];
217  int a4= src[x + (HTAPS_MAX/2+1)*stride];
218  int a5= src[x + (HTAPS_MAX/2+2)*stride];
219  int a6= src[x + (HTAPS_MAX/2+3)*stride];
220  int am=0;
221  if(!p || p->fast_mc)
222  am= (20*(a2+a3) - 5*(a1+a4) + (a0+a5) + 16)>>5;
223  else
224  am= (p->hcoeff[0]*(a2+a3) + p->hcoeff[1]*(a1+a4) + p->hcoeff[2]*(a0+a5) + p->hcoeff[3]*(a_1+a6) + 32)>>6;
225 
226  if(am&(~255)) am= ~(am>>31);
227  tmp2[x]= am;
228  }
229  src += stride;
230  tmp2+= 64;
231  }
232  src -= stride*y;
233  }
234  src += stride*(HTAPS_MAX/2 - 1);
235  tmp2= tmp2t[2];
236  tmpI= tmpIt;
237  if(b&4){
238  for(y=0; y < b_h; y++){
239  for(x=0; x < b_w; x++){
240  int a_1=tmpI[x + (HTAPS_MAX/2-4)*64];
241  int a0= tmpI[x + (HTAPS_MAX/2-3)*64];
242  int a1= tmpI[x + (HTAPS_MAX/2-2)*64];
243  int a2= tmpI[x + (HTAPS_MAX/2-1)*64];
244  int a3= tmpI[x + (HTAPS_MAX/2+0)*64];
245  int a4= tmpI[x + (HTAPS_MAX/2+1)*64];
246  int a5= tmpI[x + (HTAPS_MAX/2+2)*64];
247  int a6= tmpI[x + (HTAPS_MAX/2+3)*64];
248  int am=0;
249  if(!p || p->fast_mc)
250  am= (20*(a2+a3) - 5*(a1+a4) + (a0+a5) + 512)>>10;
251  else
252  am= (p->hcoeff[0]*(a2+a3) + p->hcoeff[1]*(a1+a4) + p->hcoeff[2]*(a0+a5) + p->hcoeff[3]*(a_1+a6) + 2048)>>12;
253  if(am&(~255)) am= ~(am>>31);
254  tmp2[x]= am;
255  }
256  tmpI+= 64;
257  tmp2+= 64;
258  }
259  }
260 
261  hpel[ 0]= src;
262  hpel[ 1]= tmp2t[0] + 64*(HTAPS_MAX/2-1);
263  hpel[ 2]= src + 1;
264 
265  hpel[ 4]= tmp2t[1];
266  hpel[ 5]= tmp2t[2];
267  hpel[ 6]= tmp2t[1] + 1;
268 
269  hpel[ 8]= src + stride;
270  hpel[ 9]= hpel[1] + 64;
271  hpel[10]= hpel[8] + 1;
272 
273 #define MC_STRIDE(x) (needs[x] ? 64 : stride)
274 
275  if(b==15){
276  int dxy = dx / 8 + dy / 8 * 4;
277  const uint8_t *src1 = hpel[dxy ];
278  const uint8_t *src2 = hpel[dxy + 1];
279  const uint8_t *src3 = hpel[dxy + 4];
280  const uint8_t *src4 = hpel[dxy + 5];
281  int stride1 = MC_STRIDE(dxy);
282  int stride2 = MC_STRIDE(dxy + 1);
283  int stride3 = MC_STRIDE(dxy + 4);
284  int stride4 = MC_STRIDE(dxy + 5);
285  dx&=7;
286  dy&=7;
287  for(y=0; y < b_h; y++){
288  for(x=0; x < b_w; x++){
289  dst[x]= ((8-dx)*(8-dy)*src1[x] + dx*(8-dy)*src2[x]+
290  (8-dx)* dy *src3[x] + dx* dy *src4[x]+32)>>6;
291  }
292  src1+=stride1;
293  src2+=stride2;
294  src3+=stride3;
295  src4+=stride4;
296  dst +=stride;
297  }
298  }else{
299  const uint8_t *src1= hpel[l];
300  const uint8_t *src2= hpel[r];
301  int stride1 = MC_STRIDE(l);
302  int stride2 = MC_STRIDE(r);
303  int a= weight[((dx&7) + (8*(dy&7)))];
304  int b= 8-a;
305  for(y=0; y < b_h; y++){
306  for(x=0; x < b_w; x++){
307  dst[x]= (a*src1[x] + b*src2[x] + 4)>>3;
308  }
309  src1+=stride1;
310  src2+=stride2;
311  dst +=stride;
312  }
313  }
314 }
315 
316 void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h){
317  if(block->type & BLOCK_INTRA){
318  int x, y;
319  const unsigned color = block->color[plane_index];
320  const unsigned color4 = color*0x01010101;
321  if(b_w==32){
322  for(y=0; y < b_h; y++){
323  *(uint32_t*)&dst[0 + y*stride]= color4;
324  *(uint32_t*)&dst[4 + y*stride]= color4;
325  *(uint32_t*)&dst[8 + y*stride]= color4;
326  *(uint32_t*)&dst[12+ y*stride]= color4;
327  *(uint32_t*)&dst[16+ y*stride]= color4;
328  *(uint32_t*)&dst[20+ y*stride]= color4;
329  *(uint32_t*)&dst[24+ y*stride]= color4;
330  *(uint32_t*)&dst[28+ y*stride]= color4;
331  }
332  }else if(b_w==16){
333  for(y=0; y < b_h; y++){
334  *(uint32_t*)&dst[0 + y*stride]= color4;
335  *(uint32_t*)&dst[4 + y*stride]= color4;
336  *(uint32_t*)&dst[8 + y*stride]= color4;
337  *(uint32_t*)&dst[12+ y*stride]= color4;
338  }
339  }else if(b_w==8){
340  for(y=0; y < b_h; y++){
341  *(uint32_t*)&dst[0 + y*stride]= color4;
342  *(uint32_t*)&dst[4 + y*stride]= color4;
343  }
344  }else if(b_w==4){
345  for(y=0; y < b_h; y++){
346  *(uint32_t*)&dst[0 + y*stride]= color4;
347  }
348  }else{
349  for(y=0; y < b_h; y++){
350  for(x=0; x < b_w; x++){
351  dst[x + y*stride]= color;
352  }
353  }
354  }
355  }else{
356  const uint8_t *src = s->last_picture[block->ref]->data[plane_index];
357  const int scale= plane_index ? (2*s->mv_scale)>>s->chroma_h_shift : 2*s->mv_scale;
358  int mx= block->mx*scale;
359  int my= block->my*scale;
360  const int dx= mx&15;
361  const int dy= my&15;
362  const int tab_index= 3 - (b_w>>2) + (b_w>>4);
363  sx += (mx>>4) - (HTAPS_MAX/2-1);
364  sy += (my>>4) - (HTAPS_MAX/2-1);
365  src += sx + sy*stride;
366  if( (unsigned)sx >= FFMAX(w - b_w - (HTAPS_MAX-2), 0)
367  || (unsigned)sy >= FFMAX(h - b_h - (HTAPS_MAX-2), 0)){
368  s->vdsp.emulated_edge_mc(tmp + MB_SIZE, src,
369  stride, stride,
370  b_w+HTAPS_MAX-1, b_h+HTAPS_MAX-1,
371  sx, sy, w, h);
372  src= tmp + MB_SIZE;
373  }
374 
375  av_assert2(s->chroma_h_shift == s->chroma_v_shift); // only one mv_scale
376 
377  av_assert2((tab_index>=0 && tab_index<4) || b_w==32);
378  if( (dx&3) || (dy&3)
379  || !(b_w == b_h || 2*b_w == b_h || b_w == 2*b_h)
380  || (b_w&(b_w-1))
381  || b_w == 1
382  || b_h == 1
383  || !s->plane[plane_index].fast_mc )
384  mc_block(&s->plane[plane_index], dst, src, stride, b_w, b_h, dx, dy);
385  else if(b_w==32){
386  int y;
387  for(y=0; y<b_h; y+=16){
388  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+(dx>>2)](dst + y*stride, src + 3 + (y+3)*stride,stride);
389  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+(dx>>2)](dst + 16 + y*stride, src + 19 + (y+3)*stride,stride);
390  }
391  }else if(b_w==b_h)
392  s->h264qpel.put_h264_qpel_pixels_tab[tab_index ][dy+(dx>>2)](dst,src + 3 + 3*stride,stride);
393  else if(b_w==2*b_h){
394  s->h264qpel.put_h264_qpel_pixels_tab[tab_index+1][dy+(dx>>2)](dst ,src + 3 + 3*stride,stride);
395  s->h264qpel.put_h264_qpel_pixels_tab[tab_index+1][dy+(dx>>2)](dst+b_h,src + 3 + b_h + 3*stride,stride);
396  }else{
397  av_assert2(2*b_w==b_h);
398  s->h264qpel.put_h264_qpel_pixels_tab[tab_index ][dy+(dx>>2)](dst ,src + 3 + 3*stride ,stride);
399  s->h264qpel.put_h264_qpel_pixels_tab[tab_index ][dy+(dx>>2)](dst+b_w*stride,src + 3 + 3*stride+b_w*stride,stride);
400  }
401  }
402 }
403 
404 #define mca(dx,dy,b_w)\
405 static void mc_block_hpel ## dx ## dy ## b_w(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h){\
406  av_assert2(h==b_w);\
407  mc_block(NULL, dst, src-(HTAPS_MAX/2-1)-(HTAPS_MAX/2-1)*stride, stride, b_w, b_w, dx, dy);\
408 }
409 
410 mca( 0, 0,16)
411 mca( 8, 0,16)
412 mca( 0, 8,16)
413 mca( 8, 8,16)
414 mca( 0, 0,8)
415 mca( 8, 0,8)
416 mca( 0, 8,8)
417 mca( 8, 8,8)
418 
419 static av_cold void snow_static_init(void)
420 {
421  for (int i = 0; i < MAX_REF_FRAMES; i++)
422  for (int j = 0; j < MAX_REF_FRAMES; j++)
423  ff_scale_mv_ref[i][j] = 256 * (i + 1) / (j + 1);
424 }
425 
427  static AVOnce init_static_once = AV_ONCE_INIT;
428  SnowContext *s = avctx->priv_data;
429  int width, height;
430  int i;
431 
432  s->avctx= avctx;
433  s->max_ref_frames=1; //just make sure it's not an invalid value in case of no initial keyframe
434  s->spatial_decomposition_count = 1;
435 
436  ff_hpeldsp_init(&s->hdsp, avctx->flags);
437  ff_videodsp_init(&s->vdsp, 8);
438  ff_dwt_init(&s->dwt);
439  ff_h264qpel_init(&s->h264qpel, 8);
440 
441 #define mcf(dx,dy)\
442  s->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
443  s->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
444  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
445  s->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
446  s->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
447  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
448 
449  mcf( 0, 0)
450  mcf( 4, 0)
451  mcf( 8, 0)
452  mcf(12, 0)
453  mcf( 0, 4)
454  mcf( 4, 4)
455  mcf( 8, 4)
456  mcf(12, 4)
457  mcf( 0, 8)
458  mcf( 4, 8)
459  mcf( 8, 8)
460  mcf(12, 8)
461  mcf( 0,12)
462  mcf( 4,12)
463  mcf( 8,12)
464  mcf(12,12)
465 
466 #define mcfh(dx,dy)\
467  s->hdsp.put_pixels_tab [0][dy/4+dx/8]=\
468  s->hdsp.put_no_rnd_pixels_tab[0][dy/4+dx/8]=\
469  mc_block_hpel ## dx ## dy ## 16;\
470  s->hdsp.put_pixels_tab [1][dy/4+dx/8]=\
471  s->hdsp.put_no_rnd_pixels_tab[1][dy/4+dx/8]=\
472  mc_block_hpel ## dx ## dy ## 8;
473 
474  mcfh(0, 0)
475  mcfh(8, 0)
476  mcfh(0, 8)
477  mcfh(8, 8)
478 
479 // dec += FFMAX(s->chroma_h_shift, s->chroma_v_shift);
480 
481  width= s->avctx->width;
482  height= s->avctx->height;
483 
484  if (!FF_ALLOCZ_TYPED_ARRAY(s->spatial_idwt_buffer, width * height) ||
485  !FF_ALLOCZ_TYPED_ARRAY(s->spatial_dwt_buffer, width * height) || //FIXME this does not belong here
486  !FF_ALLOCZ_TYPED_ARRAY(s->temp_dwt_buffer, width) ||
487  !FF_ALLOCZ_TYPED_ARRAY(s->temp_idwt_buffer, width) ||
488  !FF_ALLOCZ_TYPED_ARRAY(s->run_buffer, ((width + 1) >> 1) * ((height + 1) >> 1)))
489  return AVERROR(ENOMEM);
490 
491  for(i=0; i<MAX_REF_FRAMES; i++) {
492  s->last_picture[i] = av_frame_alloc();
493  if (!s->last_picture[i])
494  return AVERROR(ENOMEM);
495  }
496 
497  s->mconly_picture = av_frame_alloc();
498  s->current_picture = av_frame_alloc();
499  if (!s->mconly_picture || !s->current_picture)
500  return AVERROR(ENOMEM);
501 
502  ff_thread_once(&init_static_once, snow_static_init);
503 
504  return 0;
505 }
506 
508  SnowContext *s = avctx->priv_data;
509  int plane_index, level, orientation;
510  int ret, emu_buf_size;
511 
512  if(!s->scratchbuf) {
513  if (av_codec_is_decoder(avctx->codec)) {
514  if ((ret = ff_get_buffer(s->avctx, s->mconly_picture,
516  return ret;
517  }
518 
519  emu_buf_size = FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256) * (2 * MB_SIZE + HTAPS_MAX - 1);
520  if (!FF_ALLOCZ_TYPED_ARRAY(s->scratchbuf, FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256) * 7 * MB_SIZE) ||
521  !FF_ALLOCZ_TYPED_ARRAY(s->emu_edge_buffer, emu_buf_size))
522  return AVERROR(ENOMEM);
523  }
524 
525  if (av_codec_is_decoder(avctx->codec) &&
526  s->mconly_picture->format != avctx->pix_fmt) {
527  av_log(avctx, AV_LOG_ERROR, "pixel format changed\n");
528  return AVERROR_INVALIDDATA;
529  }
530 
531  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
532  int w= s->avctx->width;
533  int h= s->avctx->height;
534 
535  if(plane_index){
536  w = AV_CEIL_RSHIFT(w, s->chroma_h_shift);
537  h = AV_CEIL_RSHIFT(h, s->chroma_v_shift);
538  }
539  s->plane[plane_index].width = w;
540  s->plane[plane_index].height= h;
541 
542  for(level=s->spatial_decomposition_count-1; level>=0; level--){
543  for(orientation=level ? 1 : 0; orientation<4; orientation++){
544  SubBand *b= &s->plane[plane_index].band[level][orientation];
545 
546  b->buf= s->spatial_dwt_buffer;
547  b->level= level;
548  b->stride= s->plane[plane_index].width << (s->spatial_decomposition_count - level);
549  b->width = (w + !(orientation&1))>>1;
550  b->height= (h + !(orientation>1))>>1;
551 
552  b->stride_line = 1 << (s->spatial_decomposition_count - level);
553  b->buf_x_offset = 0;
554  b->buf_y_offset = 0;
555 
556  if(orientation&1){
557  b->buf += (w+1)>>1;
558  b->buf_x_offset = (w+1)>>1;
559  }
560  if(orientation>1){
561  b->buf += b->stride>>1;
562  b->buf_y_offset = b->stride_line >> 1;
563  }
564  b->ibuf= s->spatial_idwt_buffer + (b->buf - s->spatial_dwt_buffer);
565 
566  if(level)
567  b->parent= &s->plane[plane_index].band[level-1][orientation];
568  //FIXME avoid this realloc
569  av_freep(&b->x_coeff);
570  b->x_coeff = av_calloc((b->width + 1) * b->height + 1,
571  sizeof(*b->x_coeff));
572  if (!b->x_coeff)
573  return AVERROR(ENOMEM);
574  }
575  w= (w+1)>>1;
576  h= (h+1)>>1;
577  }
578  }
579 
580  return 0;
581 }
582 
584 {
585  SnowContext *s = avctx->priv_data;
586 
587  if(s->last_picture[s->max_ref_frames-1]->data[0]){
588  av_frame_unref(s->last_picture[s->max_ref_frames-1]);
589  }
590 }
591 
593  AVFrame *tmp;
594  int i, ret;
595 
596  ff_snow_release_buffer(s->avctx);
597 
598  tmp= s->last_picture[s->max_ref_frames-1];
599  for(i=s->max_ref_frames-1; i>0; i--)
600  s->last_picture[i] = s->last_picture[i-1];
601  s->last_picture[0] = s->current_picture;
602  s->current_picture = tmp;
603 
604  if(s->keyframe){
605  s->ref_frames= 0;
606  }else{
607  int i;
608  for(i=0; i<s->max_ref_frames && s->last_picture[i]->data[0]; i++)
609  if(i && s->last_picture[i-1]->key_frame)
610  break;
611  s->ref_frames= i;
612  if(s->ref_frames==0){
613  av_log(s->avctx,AV_LOG_ERROR, "No reference frames\n");
614  return AVERROR_INVALIDDATA;
615  }
616  }
617  if ((ret = ff_snow_get_buffer(s, s->current_picture)) < 0)
618  return ret;
619 
620  s->current_picture->key_frame= s->keyframe;
621 
622  return 0;
623 }
624 
626 {
627  int plane_index, level, orientation, i;
628 
629  av_freep(&s->spatial_dwt_buffer);
630  av_freep(&s->temp_dwt_buffer);
631  av_freep(&s->spatial_idwt_buffer);
632  av_freep(&s->temp_idwt_buffer);
633  av_freep(&s->run_buffer);
634 
635  s->m.me.temp= NULL;
636  av_freep(&s->m.me.scratchpad);
637  av_freep(&s->m.me.map);
638  av_freep(&s->m.sc.obmc_scratchpad);
639 
640  av_freep(&s->block);
641  av_freep(&s->scratchbuf);
642  av_freep(&s->emu_edge_buffer);
643 
644  for(i=0; i<MAX_REF_FRAMES; i++){
645  av_freep(&s->ref_mvs[i]);
646  av_freep(&s->ref_scores[i]);
647  if(s->last_picture[i] && s->last_picture[i]->data[0]) {
648  av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]);
649  }
650  av_frame_free(&s->last_picture[i]);
651  }
652 
653  for(plane_index=0; plane_index < MAX_PLANES; plane_index++){
654  for(level=MAX_DECOMPOSITIONS-1; level>=0; level--){
655  for(orientation=level ? 1 : 0; orientation<4; orientation++){
656  SubBand *b= &s->plane[plane_index].band[level][orientation];
657 
658  av_freep(&b->x_coeff);
659  }
660  }
661  }
662  av_frame_free(&s->mconly_picture);
663  av_frame_free(&s->current_picture);
664 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
MC_STRIDE
#define MC_STRIDE(x)
level
uint8_t level
Definition: svq3.c:204
MAX_DECOMPOSITIONS
#define MAX_DECOMPOSITIONS
Definition: dirac_dwt.h:30
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
color
Definition: vf_paletteuse.c:509
Plane::fast_mc
int fast_mc
Definition: snow.h:109
MID_STATE
#define MID_STATE
Definition: snow.h:42
thread.h
src1
const pixel * src1
Definition: h264pred_template.c:421
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:75
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
encode.h
b
#define b
Definition: input.c:41
mcfh
#define mcfh(dx, dy)
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
SnowContext
Definition: snow.h:116
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:49
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:625
Plane::diag_mc
int diag_mc
Definition: snow.h:108
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:51
BlockNode
Definition: snow.h:53
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:316
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
ff_dwt_init
av_cold void ff_dwt_init(SnowDWTContext *c)
Definition: snow_dwt.c:850
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
a1
#define a1
Definition: regdef.h:47
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:507
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:184
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_snow_get_buffer
int ff_snow_get_buffer(SnowContext *s, AVFrame *frame)
Definition: snow.c:64
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
MAX_PLANES
#define MAX_PLANES
Definition: ffv1.h:44
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
mc_block
static void mc_block(Plane *p, uint8_t *dst, const uint8_t *src, int stride, int b_w, int b_h, int dx, int dy)
Definition: snow.c:123
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
decode.h
a4
#define a4
Definition: regdef.h:50
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:426
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
ff_snow_inner_add_yblock
void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride, uint8_t **block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer *sb, int add, uint8_t *dst8)
Definition: snow.c:31
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:182
NULL
#define NULL
Definition: coverity.c:32
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
snow.h
ff_snow_release_buffer
void ff_snow_release_buffer(AVCodecContext *avctx)
Definition: snow.c:583
snowdata.h
AVOnce
#define AVOnce
Definition: thread.h:181
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:55
av_codec_is_decoder
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:83
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:729
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1473
obmc4
static const uint8_t obmc4[16]
Definition: snowdata.h:96
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
SubBand
Definition: cfhd.h:108
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
a0
#define a0
Definition: regdef.h:46
Plane::hcoeff
int8_t hcoeff[HTAPS_MAX/2]
Definition: snow.h:107
av_codec_is_encoder
int av_codec_is_encoder(const AVCodec *codec)
Definition: utils.c:75
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
src2
const pixel * src2
Definition: h264pred_template.c:422
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:60
a2
#define a2
Definition: regdef.h:48
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
ff_snow_frame_start
int ff_snow_frame_start(SnowContext *s)
Definition: snow.c:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:94
AVCodecContext
main external API structure.
Definition: avcodec.h:426
a5
#define a5
Definition: regdef.h:51
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
mcf
#define mcf(dx, dy)
Plane
Definition: cfhd.h:117
slice_buffer_get_line
#define slice_buffer_get_line(slice_buf, line_num)
Definition: snow_dwt.h:89
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
ff_scale_mv_ref
int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES]
Definition: snowdata.h:135
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:78
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:108
a3
#define a3
Definition: regdef.h:49
mca
#define mca(dx, dy, b_w)
Definition: snow.c:404