FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 enum CFHDParam {
50  ImageWidth = 20,
59  BandHeader = 55,
64  ChannelWidth = 104,
67 };
68 
69 
70 
71 static av_cold int cfhd_init(AVCodecContext *avctx)
72 {
73  CFHDContext *s = avctx->priv_data;
74 
75  avctx->bits_per_raw_sample = 10;
76  s->avctx = avctx;
77 
78  return ff_cfhd_init_vlcs(s);
79 }
80 
82 {
83  s->subband_num = 0;
84  s->level = 0;
85  s->subband_num_actual = 0;
86 }
87 
89 {
90  s->peak.level = 0;
91  s->peak.offset = 0;
92  memset(&s->peak.base, 0, sizeof(s->peak.base));
93 }
94 
96 {
97  s->coded_width = 0;
98  s->coded_height = 0;
99  s->cropped_height = 0;
100  s->bpc = 10;
101  s->channel_cnt = 4;
103  s->channel_num = 0;
104  s->lowpass_precision = 16;
105  s->quantisation = 1;
106  s->wavelet_depth = 3;
107  s->pshift = 1;
108  s->codebook = 0;
109  s->difference_coding = 0;
110  s->progressive = 0;
113 }
114 
115 /* TODO: merge with VLC tables or use LUT */
116 static inline int dequant_and_decompand(int level, int quantisation, int codebook)
117 {
118  if (codebook == 0 || codebook == 1) {
119  int64_t abslevel = abs(level);
120  if (level < 264)
121  return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) *
122  FFSIGN(level) * quantisation;
123  else
124  return level * quantisation;
125  } else
126  return level * quantisation;
127 }
128 
129 static inline void difference_coding(int16_t *band, int width, int height)
130 {
131 
132  int i,j;
133  for (i = 0; i < height; i++) {
134  for (j = 1; j < width; j++) {
135  band[j] += band[j-1];
136  }
137  band += width;
138  }
139 }
140 
141 static inline void peak_table(int16_t *band, Peak *peak, int length)
142 {
143  int i;
144  for (i = 0; i < length; i++)
145  if (abs(band[i]) > peak->level)
146  band[i] = bytestream2_get_le16(&peak->base);
147 }
148 
149 static inline void process_alpha(int16_t *alpha, int width)
150 {
151  int i, channel;
152  for (i = 0; i < width; i++) {
153  channel = alpha[i];
154  channel -= ALPHA_COMPAND_DC_OFFSET;
155  channel <<= 3;
156  channel *= ALPHA_COMPAND_GAIN;
157  channel >>= 16;
158  channel = av_clip_uintp2(channel, 12);
159  alpha[i] = channel;
160  }
161 }
162 
163 static inline void process_bayer(AVFrame *frame)
164 {
165  const int linesize = frame->linesize[0];
166  uint16_t *r = (uint16_t *)frame->data[0];
167  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
168  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
169  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
170  const int mid = 2048;
171 
172  for (int y = 0; y < frame->height >> 1; y++) {
173  for (int x = 0; x < frame->width; x += 2) {
174  int R, G1, G2, B;
175  int g, rg, bg, gd;
176 
177  g = r[x];
178  rg = g1[x];
179  bg = g2[x];
180  gd = b[x];
181  gd -= mid;
182 
183  R = (rg - mid) * 2 + g;
184  G1 = g + gd;
185  G2 = g - gd;
186  B = (bg - mid) * 2 + g;
187 
188  R = av_clip_uintp2(R * 16, 16);
189  G1 = av_clip_uintp2(G1 * 16, 16);
190  G2 = av_clip_uintp2(G2 * 16, 16);
191  B = av_clip_uintp2(B * 16, 16);
192 
193  r[x] = R;
194  g1[x] = G1;
195  g2[x] = G2;
196  b[x] = B;
197  }
198 
199  r += linesize;
200  g1 += linesize;
201  g2 += linesize;
202  b += linesize;
203  }
204 }
205 
206 static inline void filter(int16_t *output, ptrdiff_t out_stride,
207  int16_t *low, ptrdiff_t low_stride,
208  int16_t *high, ptrdiff_t high_stride,
209  int len, int clip)
210 {
211  int16_t tmp;
212  int i;
213 
214  for (i = 0; i < len; i++) {
215  if (i == 0) {
216  tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
217  output[(2*i+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
218  if (clip)
219  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
220 
221  tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
222  output[(2*i+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
223  if (clip)
224  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
225  } else if (i == len-1) {
226  tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
227  output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
228  if (clip)
229  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
230 
231  tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
232  output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
233  if (clip)
234  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
235  } else {
236  tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
237  output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
238  if (clip)
239  output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
240 
241  tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
242  output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
243  if (clip)
244  output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
245  }
246  }
247 }
248 
249 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
250  int width, int linesize, int plane)
251 {
252  int i;
253  int16_t even, odd;
254  for (i = 0; i < width; i++) {
255  even = (low[i] - high[i])/2;
256  odd = (low[i] + high[i])/2;
257  output[i] = av_clip_uintp2(even, 10);
258  output[i + linesize] = av_clip_uintp2(odd, 10);
259  }
260 }
261 static void horiz_filter(int16_t *output, int16_t *low, int16_t *high,
262  int width)
263 {
264  filter(output, 1, low, 1, high, 1, width, 0);
265 }
266 
267 static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high,
268  int width, int clip)
269 {
270  filter(output, 1, low, 1, high, 1, width, clip);
271 }
272 
273 static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high,
274  int width, int clip)
275 {
276  filter(output, 2, low, 1, high, 1, width, clip);
277 }
278 
279 static void vert_filter(int16_t *output, ptrdiff_t out_stride,
280  int16_t *low, ptrdiff_t low_stride,
281  int16_t *high, ptrdiff_t high_stride, int len)
282 {
283  filter(output, out_stride, low, low_stride, high, high_stride, len, 0);
284 }
285 
287 {
288  int i, j;
289 
290  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
291  av_freep(&s->plane[i].idwt_buf);
292  av_freep(&s->plane[i].idwt_tmp);
293 
294  for (j = 0; j < 9; j++)
295  s->plane[i].subband[j] = NULL;
296 
297  for (j = 0; j < 8; j++)
298  s->plane[i].l_h[j] = NULL;
299  }
300  s->a_height = 0;
301  s->a_width = 0;
302 }
303 
304 static int alloc_buffers(AVCodecContext *avctx)
305 {
306  CFHDContext *s = avctx->priv_data;
307  int i, j, ret, planes;
308  int chroma_x_shift, chroma_y_shift;
309  unsigned k;
310 
312  s->coded_width *= 2;
313  s->coded_height *= 2;
314  }
315 
316  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
317  return ret;
318  avctx->pix_fmt = s->coded_format;
319 
321  &chroma_x_shift,
322  &chroma_y_shift)) < 0)
323  return ret;
326  planes = 4;
327  chroma_x_shift = 1;
328  chroma_y_shift = 1;
329  }
330 
331  for (i = 0; i < planes; i++) {
332  int w8, h8, w4, h4, w2, h2;
333  int width = i ? avctx->width >> chroma_x_shift : avctx->width;
334  int height = i ? avctx->height >> chroma_y_shift : avctx->height;
335  ptrdiff_t stride = FFALIGN(width / 8, 8) * 8;
336  if (chroma_y_shift)
337  height = FFALIGN(height / 8, 2) * 8;
338  s->plane[i].width = width;
339  s->plane[i].height = height;
340  s->plane[i].stride = stride;
341 
342  w8 = FFALIGN(s->plane[i].width / 8, 8);
343  h8 = height / 8;
344  w4 = w8 * 2;
345  h4 = h8 * 2;
346  w2 = w4 * 2;
347  h2 = h4 * 2;
348 
349  s->plane[i].idwt_buf =
350  av_mallocz_array(height * stride, sizeof(*s->plane[i].idwt_buf));
351  s->plane[i].idwt_tmp =
352  av_malloc_array(height * stride, sizeof(*s->plane[i].idwt_tmp));
353  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
354  return AVERROR(ENOMEM);
355 
356  s->plane[i].subband[0] = s->plane[i].idwt_buf;
357  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
358  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
359  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
360  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
361  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
362  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
363  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
364  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
365  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
366 
367  for (j = 0; j < DWT_LEVELS; j++) {
368  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
369  s->plane[i].band[j][k].a_width = w8 << j;
370  s->plane[i].band[j][k].a_height = h8 << j;
371  }
372  }
373 
374  /* ll2 and ll1 commented out because they are done in-place */
375  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
376  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
377  // s->plane[i].l_h[2] = ll2;
378  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
379  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
380  // s->plane[i].l_h[5] = ll1;
381  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
382  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
383  }
384 
385  s->a_height = s->coded_height;
386  s->a_width = s->coded_width;
387  s->a_format = s->coded_format;
388 
389  return 0;
390 }
391 
392 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
393  AVPacket *avpkt)
394 {
395  CFHDContext *s = avctx->priv_data;
396  GetByteContext gb;
397  ThreadFrame frame = { .f = data };
398  AVFrame *pic = data;
399  int ret = 0, i, j, planes, plane, got_buffer = 0;
400  int16_t *coeff_data;
401 
405 
406  bytestream2_init(&gb, avpkt->data, avpkt->size);
407 
408  while (bytestream2_get_bytes_left(&gb) > 4) {
409  /* Bit weird but implement the tag parsing as the spec says */
410  uint16_t tagu = bytestream2_get_be16(&gb);
411  int16_t tag = (int16_t)tagu;
412  int8_t tag8 = (int8_t)(tagu >> 8);
413  uint16_t abstag = abs(tag);
414  int8_t abs_tag8 = abs(tag8);
415  uint16_t data = bytestream2_get_be16(&gb);
416  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
417  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
418  } else if (tag == SampleFlags) {
419  av_log(avctx, AV_LOG_DEBUG, "Progressive?%"PRIu16"\n", data);
420  s->progressive = data & 0x0001;
421  } else if (tag == ImageWidth) {
422  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
423  s->coded_width = data;
424  } else if (tag == ImageHeight) {
425  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
426  s->coded_height = data;
427  } else if (tag == 101) {
428  av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
429  if (data < 1 || data > 31) {
430  av_log(avctx, AV_LOG_ERROR, "Bits per component %d is invalid\n", data);
431  ret = AVERROR(EINVAL);
432  break;
433  }
434  s->bpc = data;
435  } else if (tag == ChannelCount) {
436  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
437  s->channel_cnt = data;
438  if (data > 4) {
439  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
440  ret = AVERROR_PATCHWELCOME;
441  break;
442  }
443  } else if (tag == SubbandCount) {
444  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
445  if (data != SUBBAND_COUNT) {
446  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
447  ret = AVERROR_PATCHWELCOME;
448  break;
449  }
450  } else if (tag == ChannelNumber) {
451  s->channel_num = data;
452  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
453  if (s->channel_num >= planes) {
454  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
455  ret = AVERROR(EINVAL);
456  break;
457  }
459  } else if (tag == SubbandNumber) {
460  if (s->subband_num != 0 && data == 1) // hack
461  s->level++;
462  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
463  s->subband_num = data;
464  if (s->level >= DWT_LEVELS) {
465  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
466  ret = AVERROR(EINVAL);
467  break;
468  }
469  if (s->subband_num > 3) {
470  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
471  ret = AVERROR(EINVAL);
472  break;
473  }
474  } else if (tag == 51) {
475  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
477  if (s->subband_num_actual >= 10) {
478  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
479  ret = AVERROR(EINVAL);
480  break;
481  }
482  } else if (tag == LowpassPrecision)
483  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
484  else if (tag == Quantization) {
485  s->quantisation = data;
486  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
487  } else if (tag == PrescaleShift) {
488  s->prescale_shift[0] = (data >> 0) & 0x7;
489  s->prescale_shift[1] = (data >> 3) & 0x7;
490  s->prescale_shift[2] = (data >> 6) & 0x7;
491  av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
492  } else if (tag == LowpassWidth) {
493  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
494  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
495  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
496  ret = AVERROR(EINVAL);
497  break;
498  }
499  s->plane[s->channel_num].band[0][0].width = data;
500  s->plane[s->channel_num].band[0][0].stride = data;
501  } else if (tag == LowpassHeight) {
502  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
503  if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
504  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
505  ret = AVERROR(EINVAL);
506  break;
507  }
508  s->plane[s->channel_num].band[0][0].height = data;
509  } else if (tag == SampleType)
510  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
511  else if (tag == TransformType) {
512  if (data != 0) {
513  avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
514  ret = AVERROR_PATCHWELCOME;
515  break;
516  }
517  av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
518  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
519  if (abstag == 0x4001)
520  s->peak.level = 0;
521  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
522  bytestream2_skipu(&gb, data * 4);
523  } else if (tag == 23) {
524  av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
525  avpriv_report_missing_feature(avctx, "Skip frame");
526  ret = AVERROR_PATCHWELCOME;
527  break;
528  } else if (tag == SampleIndexTable) {
529  av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
530  if (data > bytestream2_get_bytes_left(&gb) / 4) {
531  av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
532  ret = AVERROR_INVALIDDATA;
533  break;
534  }
535  for (i = 0; i < data; i++) {
536  uint16_t tag2 = bytestream2_get_be16(&gb);
537  uint16_t val2 = bytestream2_get_be16(&gb);
538  av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
539  }
540  } else if (tag == HighpassWidth) {
541  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
542  if (data < 3) {
543  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
544  ret = AVERROR(EINVAL);
545  break;
546  }
547  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
548  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
549  } else if (tag == HighpassHeight) {
550  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
551  if (data < 3) {
552  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
553  ret = AVERROR(EINVAL);
554  break;
555  }
556  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
557  } else if (tag == 49) {
558  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
559  if (data < 3) {
560  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
561  ret = AVERROR(EINVAL);
562  break;
563  }
564  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
565  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
566  } else if (tag == 50) {
567  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
568  if (data < 3) {
569  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
570  ret = AVERROR(EINVAL);
571  break;
572  }
573  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
574  } else if (tag == 71) {
575  s->codebook = data;
576  av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
577  } else if (tag == 72) {
578  s->codebook = data & 0xf;
579  s->difference_coding = (data >> 4) & 1;
580  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
581  } else if (tag == 70) {
582  av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
583  if (!(data == 10 || data == 12)) {
584  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
585  ret = AVERROR(EINVAL);
586  break;
587  }
588  s->bpc = data;
589  } else if (tag == EncodedFormat) {
590  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
591  if (data == 1) {
593  } else if (data == 2) {
595  } else if (data == 3) {
597  } else if (data == 4) {
599  } else {
600  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
601  ret = AVERROR_PATCHWELCOME;
602  break;
603  }
604  planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
605  } else if (tag == -85) {
606  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
607  s->cropped_height = data;
608  } else if (tag == -75) {
609  s->peak.offset &= ~0xffff;
610  s->peak.offset |= (data & 0xffff);
611  s->peak.base = gb;
612  s->peak.level = 0;
613  } else if (tag == -76) {
614  s->peak.offset &= 0xffff;
615  s->peak.offset |= (data & 0xffffU)<<16;
616  s->peak.base = gb;
617  s->peak.level = 0;
618  } else if (tag == -74 && s->peak.offset) {
619  s->peak.level = data;
620  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
621  } else
622  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
623 
624  /* Some kind of end of header tag */
625  if (tag == BitstreamMarker && data == 0x1a4a && s->coded_width && s->coded_height &&
627  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
628  s->a_format != s->coded_format) {
629  free_buffers(s);
630  if ((ret = alloc_buffers(avctx)) < 0) {
631  free_buffers(s);
632  return ret;
633  }
634  }
635  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
636  if (ret < 0)
637  return ret;
638  if (s->cropped_height) {
639  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
640  if (avctx->height < height)
641  return AVERROR_INVALIDDATA;
642  avctx->height = height;
643  }
644  frame.f->width =
645  frame.f->height = 0;
646 
647  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
648  return ret;
649 
650  s->coded_width = 0;
651  s->coded_height = 0;
653  got_buffer = 1;
654  }
655  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
656 
657  /* Lowpass coefficients */
658  if (tag == BitstreamMarker && data == 0xf0f && s->a_width && s->a_height) {
659  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
660  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
661  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
662  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
663 
664  if (!got_buffer) {
665  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
666  ret = AVERROR(EINVAL);
667  goto end;
668  }
669 
670  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
671  lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
672  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
673  ret = AVERROR(EINVAL);
674  goto end;
675  }
676 
677  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
678  for (i = 0; i < lowpass_height; i++) {
679  for (j = 0; j < lowpass_width; j++)
680  coeff_data[j] = bytestream2_get_be16u(&gb);
681 
682  coeff_data += lowpass_width;
683  }
684 
685  /* Align to mod-4 position to continue reading tags */
686  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
687 
688  /* Copy last line of coefficients if odd height */
689  if (lowpass_height & 1) {
690  memcpy(&coeff_data[lowpass_height * lowpass_width],
691  &coeff_data[(lowpass_height - 1) * lowpass_width],
692  lowpass_width * sizeof(*coeff_data));
693  }
694 
695  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
696  }
697 
698  if (tag == BandHeader && s->subband_num_actual != 255 && s->a_width && s->a_height) {
699  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
700  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
701  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
702  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
703  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
704  int expected;
705  int a_expected = highpass_a_height * highpass_a_width;
706  int level, run, coeff;
707  int count = 0, bytes;
708 
709  if (!got_buffer) {
710  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
711  ret = AVERROR(EINVAL);
712  goto end;
713  }
714 
715  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
716  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
717  ret = AVERROR(EINVAL);
718  goto end;
719  }
720  expected = highpass_height * highpass_stride;
721 
722  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
723 
725  {
726  OPEN_READER(re, &s->gb);
727  if (!s->codebook) {
728  while (1) {
729  UPDATE_CACHE(re, &s->gb);
730  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
731  VLC_BITS, 3, 1);
732 
733  /* escape */
734  if (level == 64)
735  break;
736 
737  count += run;
738 
739  if (count > expected)
740  break;
741 
742  coeff = dequant_and_decompand(level, s->quantisation, 0);
743  for (i = 0; i < run; i++)
744  *coeff_data++ = coeff;
745  }
746  } else {
747  while (1) {
748  UPDATE_CACHE(re, &s->gb);
749  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
750  VLC_BITS, 3, 1);
751 
752  /* escape */
753  if (level == 255 && run == 2)
754  break;
755 
756  count += run;
757 
758  if (count > expected)
759  break;
760 
761  coeff = dequant_and_decompand(level, s->quantisation, s->codebook);
762  for (i = 0; i < run; i++)
763  *coeff_data++ = coeff;
764  }
765  }
766  CLOSE_READER(re, &s->gb);
767  }
768 
769  if (count > expected) {
770  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
771  ret = AVERROR(EINVAL);
772  goto end;
773  }
774  if (s->peak.level)
775  peak_table(coeff_data - count, &s->peak, count);
776  if (s->difference_coding)
777  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
778 
779  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
780  if (bytes > bytestream2_get_bytes_left(&gb)) {
781  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
782  ret = AVERROR(EINVAL);
783  goto end;
784  } else
785  bytestream2_seek(&gb, bytes, SEEK_CUR);
786 
787  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
788  s->codebook = 0;
789 
790  /* Copy last line of coefficients if odd height */
791  if (highpass_height & 1) {
792  memcpy(&coeff_data[highpass_height * highpass_stride],
793  &coeff_data[(highpass_height - 1) * highpass_stride],
794  highpass_stride * sizeof(*coeff_data));
795  }
796  }
797  }
798 
799  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
801  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
802  ret = AVERROR(EINVAL);
803  goto end;
804  }
805 
806  if (!got_buffer) {
807  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
808  ret = AVERROR(EINVAL);
809  goto end;
810  }
811 
812  planes = av_pix_fmt_count_planes(avctx->pix_fmt);
813  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
814  if (!s->progressive)
815  return AVERROR_INVALIDDATA;
816  planes = 4;
817  }
818 
819  for (plane = 0; plane < planes && !ret; plane++) {
820  /* level 1 */
821  int lowpass_height = s->plane[plane].band[0][0].height;
822  int lowpass_width = s->plane[plane].band[0][0].width;
823  int highpass_stride = s->plane[plane].band[0][1].stride;
824  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
825  ptrdiff_t dst_linesize;
826  int16_t *low, *high, *output, *dst;
827 
828  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
829  act_plane = 0;
830  dst_linesize = pic->linesize[act_plane];
831  } else {
832  dst_linesize = pic->linesize[act_plane] / 2;
833  }
834 
835  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
836  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
837  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
838  ret = AVERROR(EINVAL);
839  goto end;
840  }
841 
842  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
843 
844  low = s->plane[plane].subband[0];
845  high = s->plane[plane].subband[2];
846  output = s->plane[plane].l_h[0];
847  for (i = 0; i < lowpass_width; i++) {
848  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
849  low++;
850  high++;
851  output++;
852  }
853 
854  low = s->plane[plane].subband[1];
855  high = s->plane[plane].subband[3];
856  output = s->plane[plane].l_h[1];
857 
858  for (i = 0; i < lowpass_width; i++) {
859  // note the stride of "low" is highpass_stride
860  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
861  low++;
862  high++;
863  output++;
864  }
865 
866  low = s->plane[plane].l_h[0];
867  high = s->plane[plane].l_h[1];
868  output = s->plane[plane].subband[0];
869  for (i = 0; i < lowpass_height * 2; i++) {
870  horiz_filter(output, low, high, lowpass_width);
871  low += lowpass_width;
872  high += lowpass_width;
873  output += lowpass_width * 2;
874  }
875  if (s->bpc == 12) {
876  output = s->plane[plane].subband[0];
877  for (i = 0; i < lowpass_height * 2; i++) {
878  for (j = 0; j < lowpass_width * 2; j++)
879  output[j] *= 4;
880 
881  output += lowpass_width * 2;
882  }
883  }
884 
885  /* level 2 */
886  lowpass_height = s->plane[plane].band[1][1].height;
887  lowpass_width = s->plane[plane].band[1][1].width;
888  highpass_stride = s->plane[plane].band[1][1].stride;
889 
890  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
891  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
892  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
893  ret = AVERROR(EINVAL);
894  goto end;
895  }
896 
897  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
898 
899  low = s->plane[plane].subband[0];
900  high = s->plane[plane].subband[5];
901  output = s->plane[plane].l_h[3];
902  for (i = 0; i < lowpass_width; i++) {
903  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
904  low++;
905  high++;
906  output++;
907  }
908 
909  low = s->plane[plane].subband[4];
910  high = s->plane[plane].subband[6];
911  output = s->plane[plane].l_h[4];
912  for (i = 0; i < lowpass_width; i++) {
913  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
914  low++;
915  high++;
916  output++;
917  }
918 
919  low = s->plane[plane].l_h[3];
920  high = s->plane[plane].l_h[4];
921  output = s->plane[plane].subband[0];
922  for (i = 0; i < lowpass_height * 2; i++) {
923  horiz_filter(output, low, high, lowpass_width);
924  low += lowpass_width;
925  high += lowpass_width;
926  output += lowpass_width * 2;
927  }
928 
929  output = s->plane[plane].subband[0];
930  for (i = 0; i < lowpass_height * 2; i++) {
931  for (j = 0; j < lowpass_width * 2; j++)
932  output[j] *= 4;
933 
934  output += lowpass_width * 2;
935  }
936 
937  /* level 3 */
938  lowpass_height = s->plane[plane].band[2][1].height;
939  lowpass_width = s->plane[plane].band[2][1].width;
940  highpass_stride = s->plane[plane].band[2][1].stride;
941 
942  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
943  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
944  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
945  ret = AVERROR(EINVAL);
946  goto end;
947  }
948 
949  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
950  if (s->progressive) {
951  low = s->plane[plane].subband[0];
952  high = s->plane[plane].subband[8];
953  output = s->plane[plane].l_h[6];
954  for (i = 0; i < lowpass_width; i++) {
955  vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
956  low++;
957  high++;
958  output++;
959  }
960 
961  low = s->plane[plane].subband[7];
962  high = s->plane[plane].subband[9];
963  output = s->plane[plane].l_h[7];
964  for (i = 0; i < lowpass_width; i++) {
965  vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
966  low++;
967  high++;
968  output++;
969  }
970 
971  dst = (int16_t *)pic->data[act_plane];
972  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
973  if (plane & 1)
974  dst++;
975  if (plane > 1)
976  dst += pic->linesize[act_plane] >> 1;
977  }
978  low = s->plane[plane].l_h[6];
979  high = s->plane[plane].l_h[7];
980 
981  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
982  (lowpass_height * 2 > avctx->coded_height / 2 ||
983  lowpass_width * 2 > avctx->coded_width / 2 )
984  ) {
985  ret = AVERROR_INVALIDDATA;
986  goto end;
987  }
988 
989  for (i = 0; i < lowpass_height * 2; i++) {
990  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
991  horiz_filter_clip_bayer(dst, low, high, lowpass_width, s->bpc);
992  else
993  horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
994  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
995  process_alpha(dst, lowpass_width * 2);
996  low += lowpass_width;
997  high += lowpass_width;
998  dst += dst_linesize;
999  }
1000  } else {
1001  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
1002  pic->interlaced_frame = 1;
1003  low = s->plane[plane].subband[0];
1004  high = s->plane[plane].subband[7];
1005  output = s->plane[plane].l_h[6];
1006  for (i = 0; i < lowpass_height; i++) {
1007  horiz_filter(output, low, high, lowpass_width);
1008  low += lowpass_width;
1009  high += lowpass_width;
1010  output += lowpass_width * 2;
1011  }
1012 
1013  low = s->plane[plane].subband[8];
1014  high = s->plane[plane].subband[9];
1015  output = s->plane[plane].l_h[7];
1016  for (i = 0; i < lowpass_height; i++) {
1017  horiz_filter(output, low, high, lowpass_width);
1018  low += lowpass_width;
1019  high += lowpass_width;
1020  output += lowpass_width * 2;
1021  }
1022 
1023  dst = (int16_t *)pic->data[act_plane];
1024  low = s->plane[plane].l_h[6];
1025  high = s->plane[plane].l_h[7];
1026  for (i = 0; i < lowpass_height; i++) {
1027  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1028  low += lowpass_width * 2;
1029  high += lowpass_width * 2;
1030  dst += pic->linesize[act_plane];
1031  }
1032  }
1033  }
1034 
1035 
1036  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1037  process_bayer(pic);
1038 end:
1039  if (ret < 0)
1040  return ret;
1041 
1042  *got_frame = 1;
1043  return avpkt->size;
1044 }
1045 
1047 {
1048  CFHDContext *s = avctx->priv_data;
1049 
1050  free_buffers(s);
1051 
1052  ff_free_vlc(&s->vlc_9);
1053  ff_free_vlc(&s->vlc_18);
1054 
1055  return 0;
1056 }
1057 
1059  .name = "cfhd",
1060  .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
1061  .type = AVMEDIA_TYPE_VIDEO,
1062  .id = AV_CODEC_ID_CFHD,
1063  .priv_data_size = sizeof(CFHDContext),
1064  .init = cfhd_init,
1065  .close = cfhd_close,
1066  .decode = cfhd_decode,
1067  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1069 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
int channel_cnt
Definition: cfhd.h:100
#define NULL
Definition: coverity.c:32
int difference_coding
Definition: cfhd.h:109
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:392
VLC vlc_18
Definition: cfhd.h:85
CFHDParam
Definition: cfhd.c:43
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVCodecContext * avctx
Definition: cfhd.h:79
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
TransformType
Definition: webp.c:110
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:141
float re
Definition: fft.c:82
misc image utilities
AVFrame * f
Definition: thread.h:35
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:105
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
int a_height
Definition: cfhd.h:50
int level
Definition: cfhd.h:73
int size
Definition: packet.h:356
#define VLC_BITS
Definition: asvdec.c:37
int cropped_height
Definition: cfhd.h:91
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int16_t * idwt_tmp
Definition: cfhd.h:63
int a_width
Definition: cfhd.h:95
ptrdiff_t stride
Definition: cfhd.h:47
uint8_t run
Definition: svq3.c:208
int subband_num_actual
Definition: cfhd.h:112
static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:267
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:249
Macro definitions for various function/variable attributes.
int width
Definition: cfhd.h:49
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:206
static void horiz_filter(int16_t *output, int16_t *low, int16_t *high, int width)
Definition: cfhd.c:261
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
#define av_cold
Definition: attributes.h:88
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t * idwt_buf
Definition: cfhd.h:62
int a_format
Definition: cfhd.h:97
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:304
#define height
uint8_t * data
Definition: packet.h:355
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1532
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high, int width, int clip)
Definition: cfhd.c:273
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
#define FFALIGN(x, a)
Definition: macros.h:48
#define SUBBAND_COUNT
Definition: cfhd.h:34
#define av_log(a,...)
static int dequant_and_decompand(int level, int quantisation, int codebook)
Definition: cfhd.c:116
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
Definition: cfhd.h:84
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
Definition: cfhd.h:81
#define U(x)
Definition: vp56_arith.h:37
uint8_t prescale_shift[3]
Definition: cfhd.h:114
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define R
Definition: huffyuvdsp.h:34
VLC vlc_9
Definition: cfhd.h:82
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int16_t * l_h[8]
Definition: cfhd.h:67
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define DWT_LEVELS
Definition: cfhd.h:42
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
Definition: get_bits.h:738
Definition: cfhd.h:72
int a_width
Definition: cfhd.h:48
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1046
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:95
int16_t SampleType
Definition: ac3enc.h:70
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
SubBand band[DWT_LEVELS][4]
Definition: cfhd.h:69
#define b
Definition: input.c:41
GetByteContext base
Definition: cfhd.h:75
int subband_cnt
Definition: cfhd.h:101
#define width
#define FFSIGN(a)
Definition: common.h:73
int width
picture width / height.
Definition: avcodec.h:699
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:81
uint16_t quantisation
Definition: cfhd.h:104
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:71
#define FF_ARRAY_ELEMS(a)
static void vert_filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len)
Definition: cfhd.c:279
static const struct @316 planes[]
int channel_num
Definition: cfhd.h:102
if(ret)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
GetBitContext gb
Definition: cfhd.h:87
int wavelet_depth
Definition: cfhd.h:105
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
#define abs(x)
Definition: cuda_runtime.h:35
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:526
int codebook
Definition: cfhd.h:108
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:129
int subband_num
Definition: cfhd.h:110
int coded_height
Definition: avcodec.h:714
int pshift
Definition: cfhd.h:106
enum AVPixelFormat coded_format
Definition: cfhd.h:92
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1058
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
refcounted data buffer API
Peak peak
Definition: cfhd.h:116
int level
Definition: cfhd.h:111
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
static void process_bayer(AVFrame *frame)
Definition: cfhd.c:163
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:209
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
int coded_width
Definition: cfhd.h:89
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
common internal and external API header
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:162
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:149
ptrdiff_t stride
Definition: cfhd.h:60
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int bpc
Definition: cfhd.h:99
void * priv_data
Definition: avcodec.h:553
int len
int a_height
Definition: cfhd.h:96
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:88
int height
Definition: cfhd.h:51
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
int width
Definition: cfhd.h:58
int offset
Definition: cfhd.h:74
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:286
int height
Definition: frame.h:366
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
int progressive
Definition: cfhd.h:93
#define av_freep(p)
#define av_malloc_array(a, b)
#define stride
Plane plane[4]
Definition: cfhd.h:115
int height
Definition: cfhd.h:59
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
Definition: cfhd.h:103
int16_t * subband[SUBBAND_COUNT]
Definition: cfhd.h:66
This structure stores compressed data.
Definition: packet.h:332
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:361
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:406
int coded_height
Definition: cfhd.h:90
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Tag MUST be even
Definition: snow.txt:206
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
static uint8_t tmp[11]
Definition: aes_ctr.c:26