FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 enum CFHDParam {
46  ImageWidth = 20,
54  ChannelWidth = 104,
57 };
58 
59 
60 
61 static av_cold int cfhd_init(AVCodecContext *avctx)
62 {
63  CFHDContext *s = avctx->priv_data;
64 
65  avctx->bits_per_raw_sample = 10;
66  s->avctx = avctx;
67 
68  return ff_cfhd_init_vlcs(s);
69 }
70 
72 {
73  s->subband_num = 0;
74  s->level = 0;
75  s->subband_num_actual = 0;
76 }
77 
79 {
80  s->peak.level = 0;
81  s->peak.offset = 0;
82  memset(&s->peak.base, 0, sizeof(s->peak.base));
83 }
84 
86 {
87  s->coded_width = 0;
88  s->coded_height = 0;
89  s->cropped_height = 0;
90  s->bpc = 10;
91  s->channel_cnt = 4;
93  s->channel_num = 0;
94  s->lowpass_precision = 16;
95  s->quantisation = 1;
96  s->wavelet_depth = 3;
97  s->pshift = 1;
98  s->codebook = 0;
99  s->difference_coding = 0;
100  s->progressive = 0;
103 }
104 
105 /* TODO: merge with VLC tables or use LUT */
106 static inline int dequant_and_decompand(int level, int quantisation, int codebook)
107 {
108  if (codebook == 0 || codebook == 1) {
109  int64_t abslevel = abs(level);
110  if (level < 264)
111  return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) *
112  FFSIGN(level) * quantisation;
113  else
114  return level * quantisation;
115  } else
116  return level * quantisation;
117 }
118 
119 static inline void difference_coding(int16_t *band, int width, int height)
120 {
121 
122  int i,j;
123  for (i = 0; i < height; i++) {
124  for (j = 1; j < width; j++) {
125  band[j] += band[j-1];
126  }
127  band += width;
128  }
129 }
130 
131 static inline void peak_table(int16_t *band, Peak *peak, int length)
132 {
133  int i;
134  for (i = 0; i < length; i++)
135  if (abs(band[i]) > peak->level)
136  band[i] = bytestream2_get_le16(&peak->base);
137 }
138 
139 static inline void process_alpha(int16_t *alpha, int width)
140 {
141  int i, channel;
142  for (i = 0; i < width; i++) {
143  channel = alpha[i];
144  channel -= ALPHA_COMPAND_DC_OFFSET;
145  channel <<= 3;
146  channel *= ALPHA_COMPAND_GAIN;
147  channel >>= 16;
148  channel = av_clip_uintp2(channel, 12);
149  alpha[i] = channel;
150  }
151 }
152 
153 static inline void process_bayer(AVFrame *frame)
154 {
155  const int linesize = frame->linesize[0];
156  uint16_t *r = (uint16_t *)frame->data[0];
157  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
158  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
159  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
160  const int mid = 2048;
161 
162  for (int y = 0; y < frame->height >> 1; y++) {
163  for (int x = 0; x < frame->width; x += 2) {
164  int R, G1, G2, B;
165  int g, rg, bg, gd;
166 
167  g = r[x];
168  rg = g1[x];
169  bg = g2[x];
170  gd = b[x];
171  gd -= mid;
172 
173  R = (rg - mid) * 2 + g;
174  G1 = g + gd;
175  G2 = g - gd;
176  B = (bg - mid) * 2 + g;
177 
178  R = av_clip_uintp2(R * 16, 16);
179  G1 = av_clip_uintp2(G1 * 16, 16);
180  G2 = av_clip_uintp2(G2 * 16, 16);
181  B = av_clip_uintp2(B * 16, 16);
182 
183  r[x] = R;
184  g1[x] = G1;
185  g2[x] = G2;
186  b[x] = B;
187  }
188 
189  r += linesize;
190  g1 += linesize;
191  g2 += linesize;
192  b += linesize;
193  }
194 }
195 
196 static inline void filter(int16_t *output, ptrdiff_t out_stride,
197  int16_t *low, ptrdiff_t low_stride,
198  int16_t *high, ptrdiff_t high_stride,
199  int len, int clip)
200 {
201  int16_t tmp;
202  int i;
203 
204  for (i = 0; i < len; i++) {
205  if (i == 0) {
206  tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
207  output[(2*i+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
208  if (clip)
209  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
210 
211  tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
212  output[(2*i+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
213  if (clip)
214  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
215  } else if (i == len-1) {
216  tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
217  output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
218  if (clip)
219  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
220 
221  tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
222  output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
223  if (clip)
224  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
225  } else {
226  tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
227  output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
228  if (clip)
229  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
230 
231  tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
232  output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
233  if (clip)
234  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
235  }
236  }
237 }
238 
239 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
240  int width, int linesize, int plane)
241 {
242  int i;
243  int16_t even, odd;
244  for (i = 0; i < width; i++) {
245  even = (low[i] - high[i])/2;
246  odd = (low[i] + high[i])/2;
247  output[i] = av_clip_uintp2(even, 10);
248  output[i + linesize] = av_clip_uintp2(odd, 10);
249  }
250 }
251 static void horiz_filter(int16_t *output, int16_t *low, int16_t *high,
252  int width)
253 {
254  filter(output, 1, low, 1, high, 1, width, 0);
255 }
256 
257 static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high,
258  int width, int clip)
259 {
260  filter(output, 1, low, 1, high, 1, width, clip);
261 }
262 
263 static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high,
264  int width, int clip)
265 {
266  filter(output, 2, low, 1, high, 1, width, clip);
267 }
268 
269 static void vert_filter(int16_t *output, ptrdiff_t out_stride,
270  int16_t *low, ptrdiff_t low_stride,
271  int16_t *high, ptrdiff_t high_stride, int len)
272 {
273  filter(output, out_stride, low, low_stride, high, high_stride, len, 0);
274 }
275 
277 {
278  int i, j;
279 
280  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
281  av_freep(&s->plane[i].idwt_buf);
282  av_freep(&s->plane[i].idwt_tmp);
283 
284  for (j = 0; j < 9; j++)
285  s->plane[i].subband[j] = NULL;
286 
287  for (j = 0; j < 8; j++)
288  s->plane[i].l_h[j] = NULL;
289  }
290  s->a_height = 0;
291  s->a_width = 0;
292 }
293 
294 static int alloc_buffers(AVCodecContext *avctx)
295 {
296  CFHDContext *s = avctx->priv_data;
297  int i, j, ret, planes;
298  int chroma_x_shift, chroma_y_shift;
299  unsigned k;
300 
302  s->coded_width *= 2;
303  s->coded_height *= 2;
304  }
305 
306  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
307  return ret;
308  avctx->pix_fmt = s->coded_format;
309 
311  &chroma_x_shift,
312  &chroma_y_shift)) < 0)
313  return ret;
316  planes = 4;
317  chroma_x_shift = 1;
318  chroma_y_shift = 1;
319  }
320 
321  for (i = 0; i < planes; i++) {
322  int w8, h8, w4, h4, w2, h2;
323  int width = i ? avctx->width >> chroma_x_shift : avctx->width;
324  int height = i ? avctx->height >> chroma_y_shift : avctx->height;
325  ptrdiff_t stride = FFALIGN(width / 8, 8) * 8;
326  if (chroma_y_shift)
327  height = FFALIGN(height / 8, 2) * 8;
328  s->plane[i].width = width;
329  s->plane[i].height = height;
330  s->plane[i].stride = stride;
331 
332  w8 = FFALIGN(s->plane[i].width / 8, 8);
333  h8 = height / 8;
334  w4 = w8 * 2;
335  h4 = h8 * 2;
336  w2 = w4 * 2;
337  h2 = h4 * 2;
338 
339  s->plane[i].idwt_buf =
340  av_mallocz_array(height * stride, sizeof(*s->plane[i].idwt_buf));
341  s->plane[i].idwt_tmp =
342  av_malloc_array(height * stride, sizeof(*s->plane[i].idwt_tmp));
343  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
344  return AVERROR(ENOMEM);
345 
346  s->plane[i].subband[0] = s->plane[i].idwt_buf;
347  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
348  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
349  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
350  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
351  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
352  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
353  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
354  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
355  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
356 
357  for (j = 0; j < DWT_LEVELS; j++) {
358  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
359  s->plane[i].band[j][k].a_width = w8 << j;
360  s->plane[i].band[j][k].a_height = h8 << j;
361  }
362  }
363 
364  /* ll2 and ll1 commented out because they are done in-place */
365  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
366  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
367  // s->plane[i].l_h[2] = ll2;
368  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
369  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
370  // s->plane[i].l_h[5] = ll1;
371  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
372  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
373  }
374 
375  s->a_height = s->coded_height;
376  s->a_width = s->coded_width;
377  s->a_format = s->coded_format;
378 
379  return 0;
380 }
381 
382 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
383  AVPacket *avpkt)
384 {
385  CFHDContext *s = avctx->priv_data;
386  GetByteContext gb;
387  ThreadFrame frame = { .f = data };
388  AVFrame *pic = data;
389  int ret = 0, i, j, planes, plane, got_buffer = 0;
390  int16_t *coeff_data;
391 
395 
396  bytestream2_init(&gb, avpkt->data, avpkt->size);
397 
398  while (bytestream2_get_bytes_left(&gb) > 4) {
399  /* Bit weird but implement the tag parsing as the spec says */
400  uint16_t tagu = bytestream2_get_be16(&gb);
401  int16_t tag = (int16_t)tagu;
402  int8_t tag8 = (int8_t)(tagu >> 8);
403  uint16_t abstag = abs(tag);
404  int8_t abs_tag8 = abs(tag8);
405  uint16_t data = bytestream2_get_be16(&gb);
406  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
407  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
408  } else if (tag == SampleFlags) {
409  av_log(avctx, AV_LOG_DEBUG, "Progressive?%"PRIu16"\n", data);
410  s->progressive = data & 0x0001;
411  } else if (tag == ImageWidth) {
412  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
413  s->coded_width = data;
414  } else if (tag == ImageHeight) {
415  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
416  s->coded_height = data;
417  } else if (tag == 101) {
418  av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
419  if (data < 1 || data > 31) {
420  av_log(avctx, AV_LOG_ERROR, "Bits per component %d is invalid\n", data);
421  ret = AVERROR(EINVAL);
422  break;
423  }
424  s->bpc = data;
425  } else if (tag == ChannelCount) {
426  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
427  s->channel_cnt = data;
428  if (data > 4) {
429  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
430  ret = AVERROR_PATCHWELCOME;
431  break;
432  }
433  } else if (tag == SubbandCount) {
434  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
435  if (data != SUBBAND_COUNT) {
436  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
437  ret = AVERROR_PATCHWELCOME;
438  break;
439  }
440  } else if (tag == ChannelNumber) {
441  s->channel_num = data;
442  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
443  if (s->channel_num >= planes) {
444  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
445  ret = AVERROR(EINVAL);
446  break;
447  }
449  } else if (tag == SubbandNumber) {
450  if (s->subband_num != 0 && data == 1) // hack
451  s->level++;
452  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
453  s->subband_num = data;
454  if (s->level >= DWT_LEVELS) {
455  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
456  ret = AVERROR(EINVAL);
457  break;
458  }
459  if (s->subband_num > 3) {
460  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
461  ret = AVERROR(EINVAL);
462  break;
463  }
464  } else if (tag == 51) {
465  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
467  if (s->subband_num_actual >= 10) {
468  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
469  ret = AVERROR(EINVAL);
470  break;
471  }
472  } else if (tag == LowpassPrecision)
473  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
474  else if (tag == Quantization) {
475  s->quantisation = data;
476  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
477  } else if (tag == PrescaleShift) {
478  s->prescale_shift[0] = (data >> 0) & 0x7;
479  s->prescale_shift[1] = (data >> 3) & 0x7;
480  s->prescale_shift[2] = (data >> 6) & 0x7;
481  av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
482  } else if (tag == 27) {
483  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
484  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
485  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
486  ret = AVERROR(EINVAL);
487  break;
488  }
489  s->plane[s->channel_num].band[0][0].width = data;
490  s->plane[s->channel_num].band[0][0].stride = data;
491  } else if (tag == 28) {
492  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
493  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
494  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
495  ret = AVERROR(EINVAL);
496  break;
497  }
498  s->plane[s->channel_num].band[0][0].height = data;
499  } else if (tag == 1)
500  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
501  else if (tag == 10) {
502  if (data != 0) {
503  avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
504  ret = AVERROR_PATCHWELCOME;
505  break;
506  }
507  av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
508  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
509  if (abstag == 0x4001)
510  s->peak.level = 0;
511  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
512  bytestream2_skipu(&gb, data * 4);
513  } else if (tag == 23) {
514  av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
515  avpriv_report_missing_feature(avctx, "Skip frame");
516  ret = AVERROR_PATCHWELCOME;
517  break;
518  } else if (tag == 2) {
519  av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
520  if (data > bytestream2_get_bytes_left(&gb) / 4) {
521  av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
522  ret = AVERROR_INVALIDDATA;
523  break;
524  }
525  for (i = 0; i < data; i++) {
526  uint16_t tag2 = bytestream2_get_be16(&gb);
527  uint16_t val2 = bytestream2_get_be16(&gb);
528  av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
529  }
530  } else if (tag == 41) {
531  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
532  if (data < 3) {
533  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
534  ret = AVERROR(EINVAL);
535  break;
536  }
537  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
538  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
539  } else if (tag == 42) {
540  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
541  if (data < 3) {
542  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
543  ret = AVERROR(EINVAL);
544  break;
545  }
546  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
547  } else if (tag == 49) {
548  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
549  if (data < 3) {
550  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
551  ret = AVERROR(EINVAL);
552  break;
553  }
554  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
555  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
556  } else if (tag == 50) {
557  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
558  if (data < 3) {
559  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
560  ret = AVERROR(EINVAL);
561  break;
562  }
563  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
564  } else if (tag == 71) {
565  s->codebook = data;
566  av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
567  } else if (tag == 72) {
568  s->codebook = data & 0xf;
569  s->difference_coding = (data >> 4) & 1;
570  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
571  } else if (tag == 70) {
572  av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
573  if (!(data == 10 || data == 12)) {
574  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
575  ret = AVERROR(EINVAL);
576  break;
577  }
578  s->bpc = data;
579  } else if (tag == 84) {
580  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
581  if (data == 1) {
583  } else if (data == 2) {
585  } else if (data == 3) {
587  } else if (data == 4) {
589  } else {
590  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
591  ret = AVERROR_PATCHWELCOME;
592  break;
593  }
594  planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
595  } else if (tag == -85) {
596  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
597  s->cropped_height = data;
598  } else if (tag == -75) {
599  s->peak.offset &= ~0xffff;
600  s->peak.offset |= (data & 0xffff);
601  s->peak.base = gb;
602  s->peak.level = 0;
603  } else if (tag == -76) {
604  s->peak.offset &= 0xffff;
605  s->peak.offset |= (data & 0xffffU)<<16;
606  s->peak.base = gb;
607  s->peak.level = 0;
608  } else if (tag == -74 && s->peak.offset) {
609  s->peak.level = data;
610  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
611  } else
612  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
613 
614  /* Some kind of end of header tag */
615  if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height &&
617  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
618  s->a_format != s->coded_format) {
619  free_buffers(s);
620  if ((ret = alloc_buffers(avctx)) < 0) {
621  free_buffers(s);
622  return ret;
623  }
624  }
625  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
626  if (ret < 0)
627  return ret;
628  if (s->cropped_height) {
629  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
630  if (avctx->height < height)
631  return AVERROR_INVALIDDATA;
632  avctx->height = height;
633  }
634  frame.f->width =
635  frame.f->height = 0;
636 
637  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
638  return ret;
639 
640  s->coded_width = 0;
641  s->coded_height = 0;
643  got_buffer = 1;
644  }
645  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
646 
647  /* Lowpass coefficients */
648  if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) {
649  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
650  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
651  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
652  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
653 
654  if (!got_buffer) {
655  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
656  ret = AVERROR(EINVAL);
657  goto end;
658  }
659 
660  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
661  lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
662  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
663  ret = AVERROR(EINVAL);
664  goto end;
665  }
666 
667  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
668  for (i = 0; i < lowpass_height; i++) {
669  for (j = 0; j < lowpass_width; j++)
670  coeff_data[j] = bytestream2_get_be16u(&gb);
671 
672  coeff_data += lowpass_width;
673  }
674 
675  /* Align to mod-4 position to continue reading tags */
676  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
677 
678  /* Copy last line of coefficients if odd height */
679  if (lowpass_height & 1) {
680  memcpy(&coeff_data[lowpass_height * lowpass_width],
681  &coeff_data[(lowpass_height - 1) * lowpass_width],
682  lowpass_width * sizeof(*coeff_data));
683  }
684 
685  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
686  }
687 
688  if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) {
689  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
690  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
691  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
692  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
693  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
694  int expected;
695  int a_expected = highpass_a_height * highpass_a_width;
696  int level, run, coeff;
697  int count = 0, bytes;
698 
699  if (!got_buffer) {
700  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
701  ret = AVERROR(EINVAL);
702  goto end;
703  }
704 
705  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
706  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
707  ret = AVERROR(EINVAL);
708  goto end;
709  }
710  expected = highpass_height * highpass_stride;
711 
712  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
713 
715  {
716  OPEN_READER(re, &s->gb);
717  if (!s->codebook) {
718  while (1) {
719  UPDATE_CACHE(re, &s->gb);
720  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
721  VLC_BITS, 3, 1);
722 
723  /* escape */
724  if (level == 64)
725  break;
726 
727  count += run;
728 
729  if (count > expected)
730  break;
731 
732  coeff = dequant_and_decompand(level, s->quantisation, 0);
733  for (i = 0; i < run; i++)
734  *coeff_data++ = coeff;
735  }
736  } else {
737  while (1) {
738  UPDATE_CACHE(re, &s->gb);
739  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
740  VLC_BITS, 3, 1);
741 
742  /* escape */
743  if (level == 255 && run == 2)
744  break;
745 
746  count += run;
747 
748  if (count > expected)
749  break;
750 
751  coeff = dequant_and_decompand(level, s->quantisation, s->codebook);
752  for (i = 0; i < run; i++)
753  *coeff_data++ = coeff;
754  }
755  }
756  CLOSE_READER(re, &s->gb);
757  }
758 
759  if (count > expected) {
760  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
761  ret = AVERROR(EINVAL);
762  goto end;
763  }
764  if (s->peak.level)
765  peak_table(coeff_data - count, &s->peak, count);
766  if (s->difference_coding)
767  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
768 
769  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
770  if (bytes > bytestream2_get_bytes_left(&gb)) {
771  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
772  ret = AVERROR(EINVAL);
773  goto end;
774  } else
775  bytestream2_seek(&gb, bytes, SEEK_CUR);
776 
777  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
778  s->codebook = 0;
779 
780  /* Copy last line of coefficients if odd height */
781  if (highpass_height & 1) {
782  memcpy(&coeff_data[highpass_height * highpass_stride],
783  &coeff_data[(highpass_height - 1) * highpass_stride],
784  highpass_stride * sizeof(*coeff_data));
785  }
786  }
787  }
788 
789  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
791  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
792  ret = AVERROR(EINVAL);
793  goto end;
794  }
795 
796  if (!got_buffer) {
797  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
798  ret = AVERROR(EINVAL);
799  goto end;
800  }
801 
802  planes = av_pix_fmt_count_planes(avctx->pix_fmt);
803  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
804  if (!s->progressive)
805  return AVERROR_INVALIDDATA;
806  planes = 4;
807  }
808 
809  for (plane = 0; plane < planes && !ret; plane++) {
810  /* level 1 */
811  int lowpass_height = s->plane[plane].band[0][0].height;
812  int lowpass_width = s->plane[plane].band[0][0].width;
813  int highpass_stride = s->plane[plane].band[0][1].stride;
814  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
815  ptrdiff_t dst_linesize;
816  int16_t *low, *high, *output, *dst;
817 
818  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
819  act_plane = 0;
820  dst_linesize = pic->linesize[act_plane];
821  } else {
822  dst_linesize = pic->linesize[act_plane] / 2;
823  }
824 
825  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
826  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
827  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
828  ret = AVERROR(EINVAL);
829  goto end;
830  }
831 
832  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
833 
834  low = s->plane[plane].subband[0];
835  high = s->plane[plane].subband[2];
836  output = s->plane[plane].l_h[0];
837  for (i = 0; i < lowpass_width; i++) {
838  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
839  low++;
840  high++;
841  output++;
842  }
843 
844  low = s->plane[plane].subband[1];
845  high = s->plane[plane].subband[3];
846  output = s->plane[plane].l_h[1];
847 
848  for (i = 0; i < lowpass_width; i++) {
849  // note the stride of "low" is highpass_stride
850  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
851  low++;
852  high++;
853  output++;
854  }
855 
856  low = s->plane[plane].l_h[0];
857  high = s->plane[plane].l_h[1];
858  output = s->plane[plane].subband[0];
859  for (i = 0; i < lowpass_height * 2; i++) {
860  horiz_filter(output, low, high, lowpass_width);
861  low += lowpass_width;
862  high += lowpass_width;
863  output += lowpass_width * 2;
864  }
865  if (s->bpc == 12) {
866  output = s->plane[plane].subband[0];
867  for (i = 0; i < lowpass_height * 2; i++) {
868  for (j = 0; j < lowpass_width * 2; j++)
869  output[j] *= 4;
870 
871  output += lowpass_width * 2;
872  }
873  }
874 
875  /* level 2 */
876  lowpass_height = s->plane[plane].band[1][1].height;
877  lowpass_width = s->plane[plane].band[1][1].width;
878  highpass_stride = s->plane[plane].band[1][1].stride;
879 
880  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
881  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
882  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
883  ret = AVERROR(EINVAL);
884  goto end;
885  }
886 
887  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
888 
889  low = s->plane[plane].subband[0];
890  high = s->plane[plane].subband[5];
891  output = s->plane[plane].l_h[3];
892  for (i = 0; i < lowpass_width; i++) {
893  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
894  low++;
895  high++;
896  output++;
897  }
898 
899  low = s->plane[plane].subband[4];
900  high = s->plane[plane].subband[6];
901  output = s->plane[plane].l_h[4];
902  for (i = 0; i < lowpass_width; i++) {
903  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
904  low++;
905  high++;
906  output++;
907  }
908 
909  low = s->plane[plane].l_h[3];
910  high = s->plane[plane].l_h[4];
911  output = s->plane[plane].subband[0];
912  for (i = 0; i < lowpass_height * 2; i++) {
913  horiz_filter(output, low, high, lowpass_width);
914  low += lowpass_width;
915  high += lowpass_width;
916  output += lowpass_width * 2;
917  }
918 
919  output = s->plane[plane].subband[0];
920  for (i = 0; i < lowpass_height * 2; i++) {
921  for (j = 0; j < lowpass_width * 2; j++)
922  output[j] *= 4;
923 
924  output += lowpass_width * 2;
925  }
926 
927  /* level 3 */
928  lowpass_height = s->plane[plane].band[2][1].height;
929  lowpass_width = s->plane[plane].band[2][1].width;
930  highpass_stride = s->plane[plane].band[2][1].stride;
931 
932  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
933  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
934  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
935  ret = AVERROR(EINVAL);
936  goto end;
937  }
938 
939  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
940  if (s->progressive) {
941  low = s->plane[plane].subband[0];
942  high = s->plane[plane].subband[8];
943  output = s->plane[plane].l_h[6];
944  for (i = 0; i < lowpass_width; i++) {
945  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
946  low++;
947  high++;
948  output++;
949  }
950 
951  low = s->plane[plane].subband[7];
952  high = s->plane[plane].subband[9];
953  output = s->plane[plane].l_h[7];
954  for (i = 0; i < lowpass_width; i++) {
955  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
956  low++;
957  high++;
958  output++;
959  }
960 
961  dst = (int16_t *)pic->data[act_plane];
962  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
963  if (plane & 1)
964  dst++;
965  if (plane > 1)
966  dst += pic->linesize[act_plane] >> 1;
967  }
968  low = s->plane[plane].l_h[6];
969  high = s->plane[plane].l_h[7];
970 
971  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
972  (lowpass_height * 2 > avctx->coded_height / 2 ||
973  lowpass_width * 2 > avctx->coded_width / 2 )
974  ) {
975  ret = AVERROR_INVALIDDATA;
976  goto end;
977  }
978 
979  for (i = 0; i < lowpass_height * 2; i++) {
980  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
981  horiz_filter_clip_bayer(dst, low, high, lowpass_width, s->bpc);
982  else
983  horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
984  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
985  process_alpha(dst, lowpass_width * 2);
986  low += lowpass_width;
987  high += lowpass_width;
988  dst += dst_linesize;
989  }
990  } else {
991  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
992  pic->interlaced_frame = 1;
993  low = s->plane[plane].subband[0];
994  high = s->plane[plane].subband[7];
995  output = s->plane[plane].l_h[6];
996  for (i = 0; i < lowpass_height; i++) {
997  horiz_filter(output, low, high, lowpass_width);
998  low += lowpass_width;
999  high += lowpass_width;
1000  output += lowpass_width * 2;
1001  }
1002 
1003  low = s->plane[plane].subband[8];
1004  high = s->plane[plane].subband[9];
1005  output = s->plane[plane].l_h[7];
1006  for (i = 0; i < lowpass_height; i++) {
1007  horiz_filter(output, low, high, lowpass_width);
1008  low += lowpass_width;
1009  high += lowpass_width;
1010  output += lowpass_width * 2;
1011  }
1012 
1013  dst = (int16_t *)pic->data[act_plane];
1014  low = s->plane[plane].l_h[6];
1015  high = s->plane[plane].l_h[7];
1016  for (i = 0; i < lowpass_height; i++) {
1017  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1018  low += lowpass_width * 2;
1019  high += lowpass_width * 2;
1020  dst += pic->linesize[act_plane];
1021  }
1022  }
1023  }
1024 
1025 
1026  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1027  process_bayer(pic);
1028 end:
1029  if (ret < 0)
1030  return ret;
1031 
1032  *got_frame = 1;
1033  return avpkt->size;
1034 }
1035 
1037 {
1038  CFHDContext *s = avctx->priv_data;
1039 
1040  free_buffers(s);
1041 
1042  if (!avctx->internal->is_copy) {
1043  ff_free_vlc(&s->vlc_9);
1044  ff_free_vlc(&s->vlc_18);
1045  }
1046 
1047  return 0;
1048 }
1049 
1051  .name = "cfhd",
1052  .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
1053  .type = AVMEDIA_TYPE_VIDEO,
1054  .id = AV_CODEC_ID_CFHD,
1055  .priv_data_size = sizeof(CFHDContext),
1056  .init = cfhd_init,
1057  .close = cfhd_close,
1058  .decode = cfhd_decode,
1059  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1061 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
int plane
Definition: avisynth_c.h:384
int channel_cnt
Definition: cfhd.h:100
#define NULL
Definition: coverity.c:32
int difference_coding
Definition: cfhd.h:109
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:382
VLC vlc_18
Definition: cfhd.h:85
CFHDParam
Definition: cfhd.c:43
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVCodecContext * avctx
Definition: cfhd.h:79
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1756
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:131
float re
Definition: fft.c:82
misc image utilities
AVFrame * f
Definition: thread.h:35
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
int a_height
Definition: cfhd.h:50
int level
Definition: cfhd.h:73
int size
Definition: avcodec.h:1481
#define VLC_BITS
Definition: asvdec.c:37
int cropped_height
Definition: cfhd.h:91
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int16_t * idwt_tmp
Definition: cfhd.h:63
int a_width
Definition: cfhd.h:95
ptrdiff_t stride
Definition: cfhd.h:47
uint8_t run
Definition: svq3.c:206
int subband_num_actual
Definition: cfhd.h:112
static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:257
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2799
AVCodec.
Definition: avcodec.h:3492
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:239
Macro definitions for various function/variable attributes.
int width
Definition: cfhd.h:49
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:196
static void horiz_filter(int16_t *output, int16_t *low, int16_t *high, int width)
Definition: cfhd.c:251
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
#define av_cold
Definition: attributes.h:82
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t * idwt_buf
Definition: cfhd.h:62
int a_format
Definition: cfhd.h:97
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:294
#define height
uint8_t * data
Definition: avcodec.h:1480
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1531
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:263
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
#define FFALIGN(x, a)
Definition: macros.h:48
#define SUBBAND_COUNT
Definition: cfhd.h:34
#define av_log(a,...)
static int dequant_and_decompand(int level, int quantisation, int codebook)
Definition: cfhd.c:106
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
Definition: cfhd.h:84
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
Definition: cfhd.h:81
#define U(x)
Definition: vp56_arith.h:37
uint8_t prescale_shift[3]
Definition: cfhd.h:114
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define R
Definition: huffyuvdsp.h:34
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:136
VLC vlc_9
Definition: cfhd.h:82
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int16_t * l_h[8]
Definition: cfhd.h:67
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
#define DWT_LEVELS
Definition: cfhd.h:42
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
Definition: get_bits.h:738
Definition: cfhd.h:72
int a_width
Definition: cfhd.h:48
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1036
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:85
SubBand band[DWT_LEVELS][4]
Definition: cfhd.h:69
#define b
Definition: input.c:41
GetByteContext base
Definition: cfhd.h:75
int subband_cnt
Definition: cfhd.h:101
#define width
#define FFSIGN(a)
Definition: common.h:73
int width
picture width / height.
Definition: avcodec.h:1741
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:71
uint16_t quantisation
Definition: cfhd.h:104
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:61
#define FF_ARRAY_ELEMS(a)
static void vert_filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len)
Definition: cfhd.c:269
int channel_num
Definition: cfhd.h:102
if(ret)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
GetBitContext gb
Definition: cfhd.h:87
int wavelet_depth
Definition: cfhd.h:105
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define abs(x)
Definition: cuda_runtime.h:35
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:1568
int codebook
Definition: cfhd.h:108
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:119
int subband_num
Definition: cfhd.h:110
int coded_height
Definition: avcodec.h:1756
int pshift
Definition: cfhd.h:106
enum AVPixelFormat coded_format
Definition: cfhd.h:92
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1050
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
refcounted data buffer API
Peak peak
Definition: cfhd.h:116
int level
Definition: cfhd.h:111
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
static void process_bayer(AVFrame *frame)
Definition: cfhd.c:153
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:412
int coded_width
Definition: cfhd.h:89
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
common internal and external API header
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:162
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:139
ptrdiff_t stride
Definition: cfhd.h:60
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int bpc
Definition: cfhd.h:99
void * priv_data
Definition: avcodec.h:1595
static const struct @317 planes[]
int len
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1603
int a_height
Definition: cfhd.h:96
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:78
int height
Definition: cfhd.h:51
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
int width
Definition: cfhd.h:58
int offset
Definition: cfhd.h:74
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:276
int height
Definition: frame.h:353
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
int progressive
Definition: cfhd.h:93
#define av_freep(p)
#define av_malloc_array(a, b)
#define stride
Plane plane[4]
Definition: cfhd.h:115
int height
Definition: cfhd.h:59
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
Definition: cfhd.h:103
int16_t * subband[SUBBAND_COUNT]
Definition: cfhd.h:66
This structure stores compressed data.
Definition: avcodec.h:1457
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
for(j=16;j >0;--j)
int coded_height
Definition: cfhd.h:90
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
Tag MUST be even
Definition: snow.txt:206
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
static uint8_t tmp[11]
Definition: aes_ctr.c:26