FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 enum CFHDParam {
46  ImageWidth = 20,
54  ChannelWidth = 104,
57 };
58 
59 
60 
61 static av_cold int cfhd_init(AVCodecContext *avctx)
62 {
63  CFHDContext *s = avctx->priv_data;
64 
65  avctx->bits_per_raw_sample = 10;
66  s->avctx = avctx;
67 
68  return ff_cfhd_init_vlcs(s);
69 }
70 
72 {
73  s->subband_num = 0;
74  s->level = 0;
75  s->subband_num_actual = 0;
76 }
77 
79 {
80  s->peak.level = 0;
81  s->peak.offset = 0;
82  memset(&s->peak.base, 0, sizeof(s->peak.base));
83 }
84 
86 {
87  s->coded_width = 0;
88  s->coded_height = 0;
89  s->cropped_height = 0;
90  s->bpc = 10;
91  s->channel_cnt = 4;
92  s->subband_cnt = SUBBAND_COUNT;
93  s->channel_num = 0;
94  s->lowpass_precision = 16;
95  s->quantisation = 1;
96  s->wavelet_depth = 3;
97  s->pshift = 1;
98  s->codebook = 0;
99  s->difference_coding = 0;
100  s->progressive = 0;
103 }
104 
105 /* TODO: merge with VLC tables or use LUT */
106 static inline int dequant_and_decompand(int level, int quantisation, int codebook)
107 {
108  if (codebook == 0 || codebook == 1) {
109  int64_t abslevel = abs(level);
110  if (level < 264)
111  return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) *
112  FFSIGN(level) * quantisation;
113  else
114  return level * quantisation;
115  } else
116  return level * quantisation;
117 }
118 
119 static inline void difference_coding(int16_t *band, int width, int height)
120 {
121 
122  int i,j;
123  for (i = 0; i < height; i++) {
124  for (j = 1; j < width; j++) {
125  band[j] += band[j-1];
126  }
127  band += width;
128  }
129 }
130 
131 static inline void peak_table(int16_t *band, Peak *peak, int length)
132 {
133  int i;
134  for (i = 0; i < length; i++)
135  if (abs(band[i]) > peak->level)
136  band[i] = bytestream2_get_le16(&peak->base);
137 }
138 
139 static inline void process_alpha(int16_t *alpha, int width)
140 {
141  int i, channel;
142  for (i = 0; i < width; i++) {
143  channel = alpha[i];
145  channel <<= 3;
147  channel >>= 16;
148  channel = av_clip_uintp2(channel, 12);
149  alpha[i] = channel;
150  }
151 }
152 
153 static inline void filter(int16_t *output, ptrdiff_t out_stride,
154  int16_t *low, ptrdiff_t low_stride,
155  int16_t *high, ptrdiff_t high_stride,
156  int len, int clip)
157 {
158  int16_t tmp;
159  int i;
160 
161  for (i = 0; i < len; i++) {
162  if (i == 0) {
163  tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
164  output[(2*i+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
165  if (clip)
166  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
167 
168  tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
169  output[(2*i+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
170  if (clip)
171  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
172  } else if (i == len-1) {
173  tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
174  output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
175  if (clip)
176  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
177 
178  tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
179  output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
180  if (clip)
181  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
182  } else {
183  tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
184  output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
185  if (clip)
186  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
187 
188  tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
189  output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
190  if (clip)
191  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
192  }
193  }
194 }
195 
196 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
197  int width, int linesize, int plane)
198 {
199  int i;
200  int16_t even, odd;
201  for (i = 0; i < width; i++) {
202  even = (low[i] - high[i])/2;
203  odd = (low[i] + high[i])/2;
204  output[i] = av_clip_uintp2(even, 10);
205  output[i + linesize] = av_clip_uintp2(odd, 10);
206  }
207 }
208 static void horiz_filter(int16_t *output, int16_t *low, int16_t *high,
209  int width)
210 {
211  filter(output, 1, low, 1, high, 1, width, 0);
212 }
213 
214 static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high,
215  int width, int clip)
216 {
217  filter(output, 1, low, 1, high, 1, width, clip);
218 }
219 
220 static void vert_filter(int16_t *output, ptrdiff_t out_stride,
221  int16_t *low, ptrdiff_t low_stride,
222  int16_t *high, ptrdiff_t high_stride, int len)
223 {
224  filter(output, out_stride, low, low_stride, high, high_stride, len, 0);
225 }
226 
228 {
229  int i, j;
230 
231  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
232  av_freep(&s->plane[i].idwt_buf);
233  av_freep(&s->plane[i].idwt_tmp);
234 
235  for (j = 0; j < 9; j++)
236  s->plane[i].subband[j] = NULL;
237 
238  for (j = 0; j < 8; j++)
239  s->plane[i].l_h[j] = NULL;
240  }
241  s->a_height = 0;
242  s->a_width = 0;
243 }
244 
245 static int alloc_buffers(AVCodecContext *avctx)
246 {
247  CFHDContext *s = avctx->priv_data;
248  int i, j, ret, planes;
249  int chroma_x_shift, chroma_y_shift;
250  unsigned k;
251 
252  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
253  return ret;
254  avctx->pix_fmt = s->coded_format;
255 
256  if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
257  &chroma_x_shift,
258  &chroma_y_shift)) < 0)
259  return ret;
260  planes = av_pix_fmt_count_planes(s->coded_format);
261 
262  for (i = 0; i < planes; i++) {
263  int w8, h8, w4, h4, w2, h2;
264  int width = i ? avctx->width >> chroma_x_shift : avctx->width;
265  int height = i ? avctx->height >> chroma_y_shift : avctx->height;
266  ptrdiff_t stride = FFALIGN(width / 8, 8) * 8;
267  if (chroma_y_shift)
268  height = FFALIGN(height / 8, 2) * 8;
269  s->plane[i].width = width;
270  s->plane[i].height = height;
271  s->plane[i].stride = stride;
272 
273  w8 = FFALIGN(s->plane[i].width / 8, 8);
274  h8 = height / 8;
275  w4 = w8 * 2;
276  h4 = h8 * 2;
277  w2 = w4 * 2;
278  h2 = h4 * 2;
279 
280  s->plane[i].idwt_buf =
281  av_mallocz_array(height * stride, sizeof(*s->plane[i].idwt_buf));
282  s->plane[i].idwt_tmp =
283  av_malloc_array(height * stride, sizeof(*s->plane[i].idwt_tmp));
284  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
285  return AVERROR(ENOMEM);
286 
287  s->plane[i].subband[0] = s->plane[i].idwt_buf;
288  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
289  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
290  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
291  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
292  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
293  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
294  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
295  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
296  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
297 
298  for (j = 0; j < DWT_LEVELS; j++) {
299  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
300  s->plane[i].band[j][k].a_width = w8 << j;
301  s->plane[i].band[j][k].a_height = h8 << j;
302  }
303  }
304 
305  /* ll2 and ll1 commented out because they are done in-place */
306  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
307  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
308  // s->plane[i].l_h[2] = ll2;
309  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
310  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
311  // s->plane[i].l_h[5] = ll1;
312  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
313  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
314  }
315 
316  s->a_height = s->coded_height;
317  s->a_width = s->coded_width;
318  s->a_format = s->coded_format;
319 
320  return 0;
321 }
322 
323 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
324  AVPacket *avpkt)
325 {
326  CFHDContext *s = avctx->priv_data;
327  GetByteContext gb;
328  ThreadFrame frame = { .f = data };
329  AVFrame *pic = data;
330  int ret = 0, i, j, planes, plane, got_buffer = 0;
331  int16_t *coeff_data;
332 
333  s->coded_format = AV_PIX_FMT_YUV422P10;
335  planes = av_pix_fmt_count_planes(s->coded_format);
336 
337  bytestream2_init(&gb, avpkt->data, avpkt->size);
338 
339  while (bytestream2_get_bytes_left(&gb) > 4) {
340  /* Bit weird but implement the tag parsing as the spec says */
341  uint16_t tagu = bytestream2_get_be16(&gb);
342  int16_t tag = (int16_t)tagu;
343  int8_t tag8 = (int8_t)(tagu >> 8);
344  uint16_t abstag = abs(tag);
345  int8_t abs_tag8 = abs(tag8);
346  uint16_t data = bytestream2_get_be16(&gb);
347  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
348  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
349  } else if (tag == SampleFlags) {
350  av_log(avctx, AV_LOG_DEBUG, "Progressive?%"PRIu16"\n", data);
351  s->progressive = data & 0x0001;
352  } else if (tag == ImageWidth) {
353  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
354  s->coded_width = data;
355  } else if (tag == ImageHeight) {
356  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
357  s->coded_height = data;
358  } else if (tag == 101) {
359  av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
360  if (data < 1 || data > 31) {
361  av_log(avctx, AV_LOG_ERROR, "Bits per component %d is invalid\n", data);
362  ret = AVERROR(EINVAL);
363  break;
364  }
365  s->bpc = data;
366  } else if (tag == ChannelCount) {
367  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
368  s->channel_cnt = data;
369  if (data > 4) {
370  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
372  break;
373  }
374  } else if (tag == SubbandCount) {
375  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
376  if (data != SUBBAND_COUNT) {
377  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
379  break;
380  }
381  } else if (tag == ChannelNumber) {
382  s->channel_num = data;
383  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
384  if (s->channel_num >= planes) {
385  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
386  ret = AVERROR(EINVAL);
387  break;
388  }
390  } else if (tag == SubbandNumber) {
391  if (s->subband_num != 0 && data == 1) // hack
392  s->level++;
393  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
394  s->subband_num = data;
395  if (s->level >= DWT_LEVELS) {
396  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
397  ret = AVERROR(EINVAL);
398  break;
399  }
400  if (s->subband_num > 3) {
401  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
402  ret = AVERROR(EINVAL);
403  break;
404  }
405  } else if (tag == 51) {
406  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
407  s->subband_num_actual = data;
408  if (s->subband_num_actual >= 10) {
409  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
410  ret = AVERROR(EINVAL);
411  break;
412  }
413  } else if (tag == LowpassPrecision)
414  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
415  else if (tag == Quantization) {
416  s->quantisation = data;
417  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
418  } else if (tag == PrescaleShift) {
419  s->prescale_shift[0] = (data >> 0) & 0x7;
420  s->prescale_shift[1] = (data >> 3) & 0x7;
421  s->prescale_shift[2] = (data >> 6) & 0x7;
422  av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
423  } else if (tag == 27) {
424  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
425  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
426  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
427  ret = AVERROR(EINVAL);
428  break;
429  }
430  s->plane[s->channel_num].band[0][0].width = data;
431  s->plane[s->channel_num].band[0][0].stride = data;
432  } else if (tag == 28) {
433  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
434  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
435  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
436  ret = AVERROR(EINVAL);
437  break;
438  }
439  s->plane[s->channel_num].band[0][0].height = data;
440  } else if (tag == 1)
441  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
442  else if (tag == 10) {
443  if (data != 0) {
444  avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
446  break;
447  } else if (data == 1) {
448  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
450  break;
451  }
452  av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
453  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
454  if (abstag == 0x4001)
455  s->peak.level = 0;
456  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
457  bytestream2_skipu(&gb, data * 4);
458  } else if (tag == 23) {
459  av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
460  avpriv_report_missing_feature(avctx, "Skip frame");
462  break;
463  } else if (tag == 2) {
464  av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
465  if (data > bytestream2_get_bytes_left(&gb) / 4) {
466  av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
468  break;
469  }
470  for (i = 0; i < data; i++) {
471  uint16_t tag2 = bytestream2_get_be16(&gb);
472  uint16_t val2 = bytestream2_get_be16(&gb);
473  av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
474  }
475  } else if (tag == 41) {
476  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
477  if (data < 3) {
478  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
479  ret = AVERROR(EINVAL);
480  break;
481  }
482  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
483  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
484  } else if (tag == 42) {
485  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
486  if (data < 3) {
487  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
488  ret = AVERROR(EINVAL);
489  break;
490  }
491  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
492  } else if (tag == 49) {
493  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
494  if (data < 3) {
495  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
496  ret = AVERROR(EINVAL);
497  break;
498  }
499  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
500  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
501  } else if (tag == 50) {
502  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
503  if (data < 3) {
504  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
505  ret = AVERROR(EINVAL);
506  break;
507  }
508  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
509  } else if (tag == 71) {
510  s->codebook = data;
511  av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
512  } else if (tag == 72) {
513  s->codebook = data & 0xf;
514  s->difference_coding = (data >> 4) & 1;
515  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
516  } else if (tag == 70) {
517  av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
518  if (!(data == 10 || data == 12)) {
519  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
520  ret = AVERROR(EINVAL);
521  break;
522  }
523  s->bpc = data;
524  } else if (tag == 84) {
525  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
526  if (data == 1)
527  s->coded_format = AV_PIX_FMT_YUV422P10;
528  else if (data == 3)
529  s->coded_format = AV_PIX_FMT_GBRP12;
530  else if (data == 4)
531  s->coded_format = AV_PIX_FMT_GBRAP12;
532  else {
533  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
535  break;
536  }
537  planes = av_pix_fmt_count_planes(s->coded_format);
538  } else if (tag == -85) {
539  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
540  s->cropped_height = data;
541  } else if (tag == -75) {
542  s->peak.offset &= ~0xffff;
543  s->peak.offset |= (data & 0xffff);
544  s->peak.base = gb;
545  s->peak.level = 0;
546  } else if (tag == -76) {
547  s->peak.offset &= 0xffff;
548  s->peak.offset |= (data & 0xffffU)<<16;
549  s->peak.base = gb;
550  s->peak.level = 0;
551  } else if (tag == -74 && s->peak.offset) {
552  s->peak.level = data;
553  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
554  s->peak.offset > 4 + bytestream2_get_bytes_left(&s->peak.base)
555  ) {
557  goto end;
558  }
559  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
560  } else
561  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
562 
563  /* Some kind of end of header tag */
564  if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height &&
565  s->coded_format != AV_PIX_FMT_NONE) {
566  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
567  s->a_format != s->coded_format) {
568  free_buffers(s);
569  if ((ret = alloc_buffers(avctx)) < 0) {
570  free_buffers(s);
571  return ret;
572  }
573  }
574  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
575  if (ret < 0)
576  return ret;
577  if (s->cropped_height)
578  avctx->height = s->cropped_height;
579  frame.f->width =
580  frame.f->height = 0;
581 
582  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
583  return ret;
584 
585  s->coded_width = 0;
586  s->coded_height = 0;
587  s->coded_format = AV_PIX_FMT_NONE;
588  got_buffer = 1;
589  }
590  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
591 
592  /* Lowpass coefficients */
593  if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) {
594  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
595  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
596  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
597  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
598 
599  if (!got_buffer) {
600  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
601  ret = AVERROR(EINVAL);
602  goto end;
603  }
604 
605  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
606  lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
607  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
608  ret = AVERROR(EINVAL);
609  goto end;
610  }
611 
612  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
613  for (i = 0; i < lowpass_height; i++) {
614  for (j = 0; j < lowpass_width; j++)
615  coeff_data[j] = bytestream2_get_be16u(&gb);
616 
617  coeff_data += lowpass_width;
618  }
619 
620  /* Align to mod-4 position to continue reading tags */
621  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
622 
623  /* Copy last line of coefficients if odd height */
624  if (lowpass_height & 1) {
625  memcpy(&coeff_data[lowpass_height * lowpass_width],
626  &coeff_data[(lowpass_height - 1) * lowpass_width],
627  lowpass_width * sizeof(*coeff_data));
628  }
629 
630  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
631  }
632 
633  if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) {
634  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
635  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
636  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
637  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
638  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
639  int expected;
640  int a_expected = highpass_a_height * highpass_a_width;
641  int level, run, coeff;
642  int count = 0, bytes;
643 
644  if (!got_buffer) {
645  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
646  ret = AVERROR(EINVAL);
647  goto end;
648  }
649 
650  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
651  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
652  ret = AVERROR(EINVAL);
653  goto end;
654  }
655  expected = highpass_height * highpass_stride;
656 
657  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
658 
659  init_get_bits(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb) * 8);
660  {
661  OPEN_READER(re, &s->gb);
662  if (!s->codebook) {
663  while (1) {
664  UPDATE_CACHE(re, &s->gb);
665  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
666  VLC_BITS, 3, 1);
667 
668  /* escape */
669  if (level == 64)
670  break;
671 
672  count += run;
673 
674  if (count > expected)
675  break;
676 
677  coeff = dequant_and_decompand(level, s->quantisation, 0);
678  for (i = 0; i < run; i++)
679  *coeff_data++ = coeff;
680  }
681  } else {
682  while (1) {
683  UPDATE_CACHE(re, &s->gb);
684  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
685  VLC_BITS, 3, 1);
686 
687  /* escape */
688  if (level == 255 && run == 2)
689  break;
690 
691  count += run;
692 
693  if (count > expected)
694  break;
695 
696  coeff = dequant_and_decompand(level, s->quantisation, s->codebook);
697  for (i = 0; i < run; i++)
698  *coeff_data++ = coeff;
699  }
700  }
701  CLOSE_READER(re, &s->gb);
702  }
703 
704  if (count > expected) {
705  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
706  ret = AVERROR(EINVAL);
707  goto end;
708  }
709  if (s->peak.level)
710  peak_table(coeff_data - count, &s->peak, count);
711  if (s->difference_coding)
712  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
713 
714  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
715  if (bytes > bytestream2_get_bytes_left(&gb)) {
716  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
717  ret = AVERROR(EINVAL);
718  goto end;
719  } else
720  bytestream2_seek(&gb, bytes, SEEK_CUR);
721 
722  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
723  s->codebook = 0;
724 
725  /* Copy last line of coefficients if odd height */
726  if (highpass_height & 1) {
727  memcpy(&coeff_data[highpass_height * highpass_stride],
728  &coeff_data[(highpass_height - 1) * highpass_stride],
729  highpass_stride * sizeof(*coeff_data));
730  }
731  }
732  }
733 
734  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
735  s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
736  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
737  ret = AVERROR(EINVAL);
738  goto end;
739  }
740 
741  if (!got_buffer) {
742  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
743  ret = AVERROR(EINVAL);
744  goto end;
745  }
746 
748  for (plane = 0; plane < planes && !ret; plane++) {
749  /* level 1 */
750  int lowpass_height = s->plane[plane].band[0][0].height;
751  int lowpass_width = s->plane[plane].band[0][0].width;
752  int highpass_stride = s->plane[plane].band[0][1].stride;
753  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
754  int16_t *low, *high, *output, *dst;
755 
756  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
757  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
758  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
759  ret = AVERROR(EINVAL);
760  goto end;
761  }
762 
763  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
764 
765  low = s->plane[plane].subband[0];
766  high = s->plane[plane].subband[2];
767  output = s->plane[plane].l_h[0];
768  for (i = 0; i < lowpass_width; i++) {
769  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
770  low++;
771  high++;
772  output++;
773  }
774 
775  low = s->plane[plane].subband[1];
776  high = s->plane[plane].subband[3];
777  output = s->plane[plane].l_h[1];
778 
779  for (i = 0; i < lowpass_width; i++) {
780  // note the stride of "low" is highpass_stride
781  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
782  low++;
783  high++;
784  output++;
785  }
786 
787  low = s->plane[plane].l_h[0];
788  high = s->plane[plane].l_h[1];
789  output = s->plane[plane].subband[0];
790  for (i = 0; i < lowpass_height * 2; i++) {
791  horiz_filter(output, low, high, lowpass_width);
792  low += lowpass_width;
793  high += lowpass_width;
794  output += lowpass_width * 2;
795  }
796  if (s->bpc == 12) {
797  output = s->plane[plane].subband[0];
798  for (i = 0; i < lowpass_height * 2; i++) {
799  for (j = 0; j < lowpass_width * 2; j++)
800  output[j] *= 4;
801 
802  output += lowpass_width * 2;
803  }
804  }
805 
806  /* level 2 */
807  lowpass_height = s->plane[plane].band[1][1].height;
808  lowpass_width = s->plane[plane].band[1][1].width;
809  highpass_stride = s->plane[plane].band[1][1].stride;
810 
811  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
812  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
813  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
814  ret = AVERROR(EINVAL);
815  goto end;
816  }
817 
818  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
819 
820  low = s->plane[plane].subband[0];
821  high = s->plane[plane].subband[5];
822  output = s->plane[plane].l_h[3];
823  for (i = 0; i < lowpass_width; i++) {
824  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
825  low++;
826  high++;
827  output++;
828  }
829 
830  low = s->plane[plane].subband[4];
831  high = s->plane[plane].subband[6];
832  output = s->plane[plane].l_h[4];
833  for (i = 0; i < lowpass_width; i++) {
834  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
835  low++;
836  high++;
837  output++;
838  }
839 
840  low = s->plane[plane].l_h[3];
841  high = s->plane[plane].l_h[4];
842  output = s->plane[plane].subband[0];
843  for (i = 0; i < lowpass_height * 2; i++) {
844  horiz_filter(output, low, high, lowpass_width);
845  low += lowpass_width;
846  high += lowpass_width;
847  output += lowpass_width * 2;
848  }
849 
850  output = s->plane[plane].subband[0];
851  for (i = 0; i < lowpass_height * 2; i++) {
852  for (j = 0; j < lowpass_width * 2; j++)
853  output[j] *= 4;
854 
855  output += lowpass_width * 2;
856  }
857 
858  /* level 3 */
859  lowpass_height = s->plane[plane].band[2][1].height;
860  lowpass_width = s->plane[plane].band[2][1].width;
861  highpass_stride = s->plane[plane].band[2][1].stride;
862 
863  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
864  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
865  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
866  ret = AVERROR(EINVAL);
867  goto end;
868  }
869 
870  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
871  if (s->progressive) {
872  low = s->plane[plane].subband[0];
873  high = s->plane[plane].subband[8];
874  output = s->plane[plane].l_h[6];
875  for (i = 0; i < lowpass_width; i++) {
876  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
877  low++;
878  high++;
879  output++;
880  }
881 
882  low = s->plane[plane].subband[7];
883  high = s->plane[plane].subband[9];
884  output = s->plane[plane].l_h[7];
885  for (i = 0; i < lowpass_width; i++) {
886  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
887  low++;
888  high++;
889  output++;
890  }
891 
892  dst = (int16_t *)pic->data[act_plane];
893  low = s->plane[plane].l_h[6];
894  high = s->plane[plane].l_h[7];
895  for (i = 0; i < lowpass_height * 2; i++) {
896  horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
897  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
898  process_alpha(dst, lowpass_width * 2);
899  low += lowpass_width;
900  high += lowpass_width;
901  dst += pic->linesize[act_plane] / 2;
902  }
903  } else {
904  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
905  pic->interlaced_frame = 1;
906  low = s->plane[plane].subband[0];
907  high = s->plane[plane].subband[7];
908  output = s->plane[plane].l_h[6];
909  for (i = 0; i < lowpass_height; i++) {
910  horiz_filter(output, low, high, lowpass_width);
911  low += lowpass_width;
912  high += lowpass_width;
913  output += lowpass_width * 2;
914  }
915 
916  low = s->plane[plane].subband[8];
917  high = s->plane[plane].subband[9];
918  output = s->plane[plane].l_h[7];
919  for (i = 0; i < lowpass_height; i++) {
920  horiz_filter(output, low, high, lowpass_width);
921  low += lowpass_width;
922  high += lowpass_width;
923  output += lowpass_width * 2;
924  }
925 
926  dst = (int16_t *)pic->data[act_plane];
927  low = s->plane[plane].l_h[6];
928  high = s->plane[plane].l_h[7];
929  for (i = 0; i < lowpass_height; i++) {
930  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
931  low += lowpass_width * 2;
932  high += lowpass_width * 2;
933  dst += pic->linesize[act_plane];
934  }
935  }
936  }
937 
938 
939 end:
940  if (ret < 0)
941  return ret;
942 
943  *got_frame = 1;
944  return avpkt->size;
945 }
946 
948 {
949  CFHDContext *s = avctx->priv_data;
950 
951  free_buffers(s);
952 
953  if (!avctx->internal->is_copy) {
954  ff_free_vlc(&s->vlc_9);
955  ff_free_vlc(&s->vlc_18);
956  }
957 
958  return 0;
959 }
960 
962  .name = "cfhd",
963  .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
964  .type = AVMEDIA_TYPE_VIDEO,
965  .id = AV_CODEC_ID_CFHD,
966  .priv_data_size = sizeof(CFHDContext),
967  .init = cfhd_init,
968  .close = cfhd_close,
969  .decode = cfhd_decode,
972 };
AVCodec
AVCodec.
Definition: avcodec.h:3481
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
level
uint8_t level
Definition: svq3.c:207
Peak::level
int level
Definition: cfhd.h:73
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
GetByteContext
Definition: bytestream.h:33
horiz_filter
static void horiz_filter(int16_t *output, int16_t *low, int16_t *high, int width)
Definition: cfhd.c:208
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
count
void INT64 INT64 count
Definition: avisynth_c.h:767
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
SampleFlags
@ SampleFlags
Definition: cfhd.c:52
internal.h
even
Tag MUST be even
Definition: snow.txt:206
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
data
const char data[16]
Definition: mxf.c:91
ALPHA_COMPAND_DC_OFFSET
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
ChannelNumber
@ ChannelNumber
Definition: cfhd.c:51
cfhd_init
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:61
difference_coding
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:119
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
ff_cfhd_init_vlcs
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
AVCodecInternal::is_copy
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:136
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
CFHDParam
CFHDParam
Definition: cfhd.c:43
init_peak_table_defaults
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:78
cfhd.h
U
#define U(x)
Definition: vp56_arith.h:37
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
plane
int plane
Definition: avisynth_c.h:384
VLC_BITS
#define VLC_BITS
Definition: asvdec.c:37
DWT_LEVELS
#define DWT_LEVELS
Definition: cfhd.h:42
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
peak_table
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:131
cfhd_decode
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:323
process_alpha
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:139
AV_CODEC_ID_CFHD
@ AV_CODEC_ID_CFHD
Definition: avcodec.h:430
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
ALPHA_COMPAND_GAIN
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
horiz_filter_clip
static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:214
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2796
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ImageWidth
@ ImageWidth
Definition: cfhd.c:46
ImageHeight
@ ImageHeight
Definition: cfhd.c:47
get_bits.h
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
ChannelWidth
@ ChannelWidth
Definition: cfhd.c:54
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:206
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
Peak
Definition: cfhd.h:72
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
abs
#define abs(x)
Definition: cuda_runtime.h:35
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
free_buffers
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:227
SubbandCount
@ SubbandCount
Definition: cfhd.c:45
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
SUBBAND_COUNT
#define SUBBAND_COUNT
Definition: cfhd.h:34
BitsPerComponent
@ BitsPerComponent
Definition: cfhd.c:53
alloc_buffers
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:245
Quantization
@ Quantization
Definition: cfhd.c:50
ff_cfhd_decoder
AVCodec ff_cfhd_decoder
Definition: cfhd.c:961
planes
static const struct @314 planes[]
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
Peak::base
GetByteContext base
Definition: cfhd.h:75
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
ChannelHeight
@ ChannelHeight
Definition: cfhd.c:55
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
buffer.h
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
attributes.h
ChannelCount
@ ChannelCount
Definition: cfhd.c:44
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
CFHDContext
Definition: cfhd.h:78
common.h
filter
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
LowpassPrecision
@ LowpassPrecision
Definition: cfhd.c:48
dequant_and_decompand
static int dequant_and_decompand(int level, int quantisation, int codebook)
Definition: cfhd.c:106
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
avcodec.h
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:738
tag
uint32_t tag
Definition: movenc.c:1496
SubbandNumber
@ SubbandNumber
Definition: cfhd.c:49
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
interlaced_vertical_filter
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:196
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
ThreadFrame
Definition: thread.h:34
init_plane_defaults
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:71
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
vert_filter
static void vert_filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len)
Definition: cfhd.c:220
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
PrescaleShift
@ PrescaleShift
Definition: cfhd.c:56
clip
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:162
cfhd_close
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:947
init_frame_defaults
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:85
channel
channel
Definition: ebur128.h:39
re
float re
Definition: fft.c:82