FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 static av_cold int cfhd_init(AVCodecContext *avctx)
44 {
45  CFHDContext *s = avctx->priv_data;
46 
47  s->avctx = avctx;
48 
49  for (int i = 0; i < 64; i++) {
50  int val = i;
51 
52  if (val >= 40) {
53  if (val >= 54) {
54  val -= 54;
55  val <<= 2;
56  val += 54;
57  }
58 
59  val -= 40;
60  val <<= 2;
61  val += 40;
62  }
63 
64  s->lut[0][i] = val;
65  }
66 
67  for (int i = 0; i < 256; i++)
68  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
69 
70  return ff_cfhd_init_vlcs(s);
71 }
72 
74 {
75  s->subband_num = 0;
76  s->level = 0;
77  s->subband_num_actual = 0;
78 }
79 
81 {
82  s->peak.level = 0;
83  s->peak.offset = 0;
84  memset(&s->peak.base, 0, sizeof(s->peak.base));
85 }
86 
88 {
89  s->coded_width = 0;
90  s->coded_height = 0;
92  s->cropped_height = 0;
93  s->bpc = 10;
94  s->channel_cnt = 3;
96  s->channel_num = 0;
97  s->lowpass_precision = 16;
98  s->quantisation = 1;
99  s->codebook = 0;
100  s->difference_coding = 0;
101  s->frame_type = 0;
102  s->sample_type = 0;
105 }
106 
107 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
108 {
109  if (codebook == 0 || codebook == 1) {
110  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
111  } else
112  return level * quantisation;
113 }
114 
115 static inline void difference_coding(int16_t *band, int width, int height)
116 {
117 
118  int i,j;
119  for (i = 0; i < height; i++) {
120  for (j = 1; j < width; j++) {
121  band[j] += band[j-1];
122  }
123  band += width;
124  }
125 }
126 
127 static inline void peak_table(int16_t *band, Peak *peak, int length)
128 {
129  int i;
130  for (i = 0; i < length; i++)
131  if (abs(band[i]) > peak->level)
132  band[i] = bytestream2_get_le16(&peak->base);
133 }
134 
135 static inline void process_alpha(int16_t *alpha, int width)
136 {
137  int i, channel;
138  for (i = 0; i < width; i++) {
139  channel = alpha[i];
140  channel -= ALPHA_COMPAND_DC_OFFSET;
141  channel <<= 3;
142  channel *= ALPHA_COMPAND_GAIN;
143  channel >>= 16;
144  channel = av_clip_uintp2(channel, 12);
145  alpha[i] = channel;
146  }
147 }
148 
149 static inline void process_bayer(AVFrame *frame, int bpc)
150 {
151  const int linesize = frame->linesize[0];
152  uint16_t *r = (uint16_t *)frame->data[0];
153  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
154  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
155  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
156  const int mid = 1 << (bpc - 1);
157  const int factor = 1 << (16 - bpc);
158 
159  for (int y = 0; y < frame->height >> 1; y++) {
160  for (int x = 0; x < frame->width; x += 2) {
161  int R, G1, G2, B;
162  int g, rg, bg, gd;
163 
164  g = r[x];
165  rg = g1[x];
166  bg = g2[x];
167  gd = b[x];
168  gd -= mid;
169 
170  R = (rg - mid) * 2 + g;
171  G1 = g + gd;
172  G2 = g - gd;
173  B = (bg - mid) * 2 + g;
174 
175  R = av_clip_uintp2(R * factor, 16);
176  G1 = av_clip_uintp2(G1 * factor, 16);
177  G2 = av_clip_uintp2(G2 * factor, 16);
178  B = av_clip_uintp2(B * factor, 16);
179 
180  r[x] = R;
181  g1[x] = G1;
182  g2[x] = G2;
183  b[x] = B;
184  }
185 
186  r += linesize;
187  g1 += linesize;
188  g2 += linesize;
189  b += linesize;
190  }
191 }
192 
193 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
194  int width, int linesize, int plane)
195 {
196  int i;
197  int16_t even, odd;
198  for (i = 0; i < width; i++) {
199  even = (low[i] - high[i])/2;
200  odd = (low[i] + high[i])/2;
201  output[i] = av_clip_uintp2(even, 10);
202  output[i + linesize] = av_clip_uintp2(odd, 10);
203  }
204 }
205 
206 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
207 {
208  for (int i = 0; i < width; i++) {
209  int even = (low[i] - high[i]) / 2;
210  int odd = (low[i] + high[i]) / 2;
211 
212  low[i] = even;
213  high[i] = odd;
214  }
215 }
216 
218 {
219  int i, j;
220 
221  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
222  av_freep(&s->plane[i].idwt_buf);
223  av_freep(&s->plane[i].idwt_tmp);
224  s->plane[i].idwt_size = 0;
225 
226  for (j = 0; j < SUBBAND_COUNT_3D; j++)
227  s->plane[i].subband[j] = NULL;
228 
229  for (j = 0; j < 10; j++)
230  s->plane[i].l_h[j] = NULL;
231  }
232  s->a_height = 0;
233  s->a_width = 0;
234 }
235 
236 static int alloc_buffers(AVCodecContext *avctx)
237 {
238  CFHDContext *s = avctx->priv_data;
239  int i, j, ret, planes, bayer = 0;
240  int chroma_x_shift, chroma_y_shift;
241  unsigned k;
242 
243  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
244  return ret;
245  avctx->pix_fmt = s->coded_format;
246 
248 
250  &chroma_x_shift,
251  &chroma_y_shift)) < 0)
252  return ret;
255  planes = 4;
256  chroma_x_shift = 1;
257  chroma_y_shift = 1;
258  bayer = 1;
259  }
260 
261  for (i = 0; i < planes; i++) {
262  int w8, h8, w4, h4, w2, h2;
263  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
264  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
265  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
266 
267  if (chroma_y_shift && !bayer)
268  height = FFALIGN(height / 8, 2) * 8;
269  s->plane[i].width = width;
270  s->plane[i].height = height;
271  s->plane[i].stride = stride;
272 
273  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
274  h8 = FFALIGN(height, 8) / 8;
275  w4 = w8 * 2;
276  h4 = h8 * 2;
277  w2 = w4 * 2;
278  h2 = h4 * 2;
279 
280  if (s->transform_type == 0) {
281  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
282  s->plane[i].idwt_buf =
283  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
284  s->plane[i].idwt_tmp =
285  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
286  } else {
287  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
288  s->plane[i].idwt_buf =
289  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
290  s->plane[i].idwt_tmp =
291  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
292  }
293 
294  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
295  return AVERROR(ENOMEM);
296 
297  s->plane[i].subband[0] = s->plane[i].idwt_buf;
298  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
299  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
300  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
301  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
302  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
303  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
304  if (s->transform_type == 0) {
305  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
306  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
307  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
308  } else {
309  int16_t *frame2 =
310  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
311  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
312  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
313  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
314  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
315  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
316  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
317  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
318  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
319  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
320  }
321 
322  if (s->transform_type == 0) {
323  for (j = 0; j < DWT_LEVELS; j++) {
324  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
325  s->plane[i].band[j][k].a_width = w8 << j;
326  s->plane[i].band[j][k].a_height = h8 << j;
327  }
328  }
329  } else {
330  for (j = 0; j < DWT_LEVELS_3D; j++) {
331  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
332 
333  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
334  s->plane[i].band[j][k].a_width = w8 << t;
335  s->plane[i].band[j][k].a_height = h8 << t;
336  }
337  }
338  }
339 
340  /* ll2 and ll1 commented out because they are done in-place */
341  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
342  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
343  // s->plane[i].l_h[2] = ll2;
344  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
345  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
346  // s->plane[i].l_h[5] = ll1;
347  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
348  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
349  if (s->transform_type != 0) {
350  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
351 
352  s->plane[i].l_h[8] = frame2;
353  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
354  }
355  }
356 
357  s->a_height = s->coded_height;
358  s->a_width = s->coded_width;
359  s->a_format = s->coded_format;
360 
361  return 0;
362 }
363 
364 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
365  AVPacket *avpkt)
366 {
367  CFHDContext *s = avctx->priv_data;
368  CFHDDSPContext *dsp = &s->dsp;
369  GetByteContext gb;
370  ThreadFrame frame = { .f = data };
371  AVFrame *pic = data;
372  int ret = 0, i, j, plane, got_buffer = 0;
373  int16_t *coeff_data;
374 
377 
378  bytestream2_init(&gb, avpkt->data, avpkt->size);
379 
380  while (bytestream2_get_bytes_left(&gb) >= 4) {
381  /* Bit weird but implement the tag parsing as the spec says */
382  uint16_t tagu = bytestream2_get_be16(&gb);
383  int16_t tag = (int16_t)tagu;
384  int8_t tag8 = (int8_t)(tagu >> 8);
385  uint16_t abstag = abs(tag);
386  int8_t abs_tag8 = abs(tag8);
387  uint16_t data = bytestream2_get_be16(&gb);
388  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
389  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
390  } else if (tag == SampleFlags) {
391  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
392  s->progressive = data & 0x0001;
393  } else if (tag == FrameType) {
394  s->frame_type = data;
395  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
396  } else if (abstag == VersionMajor) {
397  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
398  } else if (abstag == VersionMinor) {
399  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
400  } else if (abstag == VersionRevision) {
401  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
402  } else if (abstag == VersionEdit) {
403  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
404  } else if (abstag == Version) {
405  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
406  } else if (tag == ImageWidth) {
407  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
408  s->coded_width = data;
409  } else if (tag == ImageHeight) {
410  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
411  s->coded_height = data;
412  } else if (tag == ChannelCount) {
413  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
414  s->channel_cnt = data;
415  if (data > 4) {
416  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
417  ret = AVERROR_PATCHWELCOME;
418  break;
419  }
420  } else if (tag == SubbandCount) {
421  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
422  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
423  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
424  ret = AVERROR_PATCHWELCOME;
425  break;
426  }
427  } else if (tag == ChannelNumber) {
428  s->channel_num = data;
429  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
430  if (s->channel_num >= s->planes) {
431  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
432  ret = AVERROR(EINVAL);
433  break;
434  }
436  } else if (tag == SubbandNumber) {
437  if (s->subband_num != 0 && data == 1) // hack
438  s->level++;
439  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
440  s->subband_num = data;
441  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
442  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
443  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
444  ret = AVERROR(EINVAL);
445  break;
446  }
447  if (s->subband_num > 3) {
448  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
449  ret = AVERROR(EINVAL);
450  break;
451  }
452  } else if (tag == SubbandBand) {
453  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
455  if ((s->transform_type == 0 && s->subband_num_actual >= SUBBAND_COUNT) ||
456  (s->transform_type == 2 && s->subband_num_actual >= SUBBAND_COUNT_3D && s->subband_num_actual != 255)) {
457  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
458  ret = AVERROR(EINVAL);
459  break;
460  }
461  } else if (tag == LowpassPrecision)
462  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
463  else if (tag == Quantization) {
464  s->quantisation = data;
465  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
466  } else if (tag == PrescaleTable) {
467  for (i = 0; i < 8; i++)
468  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
469  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
470  } else if (tag == BandEncoding) {
471  if (!data || data > 5) {
472  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
473  ret = AVERROR(EINVAL);
474  break;
475  }
476  s->band_encoding = data;
477  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
478  } else if (tag == LowpassWidth) {
479  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
480  s->plane[s->channel_num].band[0][0].width = data;
481  s->plane[s->channel_num].band[0][0].stride = data;
482  } else if (tag == LowpassHeight) {
483  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
484  s->plane[s->channel_num].band[0][0].height = data;
485  } else if (tag == SampleType) {
486  s->sample_type = data;
487  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
488  } else if (tag == TransformType) {
489  if (data > 2) {
490  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
491  ret = AVERROR(EINVAL);
492  break;
493  } else if (data == 1) {
494  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
495  ret = AVERROR_PATCHWELCOME;
496  break;
497  }
498  s->transform_type = data;
499  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
500  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
501  if (abstag == 0x4001)
502  s->peak.level = 0;
503  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
504  bytestream2_skipu(&gb, data * 4);
505  } else if (tag == FrameIndex) {
506  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
507  s->frame_index = data;
508  } else if (tag == SampleIndexTable) {
509  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
510  if (data > bytestream2_get_bytes_left(&gb) / 4) {
511  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
512  ret = AVERROR_INVALIDDATA;
513  break;
514  }
515  for (i = 0; i < data; i++) {
516  uint32_t offset = bytestream2_get_be32(&gb);
517  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
518  }
519  } else if (tag == HighpassWidth) {
520  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
521  if (data < 3) {
522  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
523  ret = AVERROR(EINVAL);
524  break;
525  }
526  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
527  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
528  } else if (tag == HighpassHeight) {
529  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
530  if (data < 3) {
531  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
532  ret = AVERROR(EINVAL);
533  break;
534  }
535  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
536  } else if (tag == BandWidth) {
537  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
538  if (data < 3) {
539  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
540  ret = AVERROR(EINVAL);
541  break;
542  }
543  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
544  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
545  } else if (tag == BandHeight) {
546  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
547  if (data < 3) {
548  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
549  ret = AVERROR(EINVAL);
550  break;
551  }
552  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
553  } else if (tag == InputFormat) {
554  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
555  if (s->coded_format == AV_PIX_FMT_NONE ||
557  if (data >= 100 && data <= 105) {
559  } else if (data >= 122 && data <= 128) {
561  } else if (data == 30) {
563  } else {
565  }
567  }
568  } else if (tag == BandCodingFlags) {
569  s->codebook = data & 0xf;
570  s->difference_coding = (data >> 4) & 1;
571  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
572  } else if (tag == Precision) {
573  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
574  if (!(data == 10 || data == 12)) {
575  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
576  ret = AVERROR(EINVAL);
577  break;
578  }
579  avctx->bits_per_raw_sample = s->bpc = data;
580  } else if (tag == EncodedFormat) {
581  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
582  if (data == 1) {
584  } else if (data == 2) {
586  } else if (data == 3) {
588  } else if (data == 4) {
590  } else {
591  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
592  ret = AVERROR_PATCHWELCOME;
593  break;
594  }
595  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
596  } else if (tag == -DisplayHeight) {
597  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
598  s->cropped_height = data;
599  } else if (tag == -PeakOffsetLow) {
600  s->peak.offset &= ~0xffff;
601  s->peak.offset |= (data & 0xffff);
602  s->peak.base = gb;
603  s->peak.level = 0;
604  } else if (tag == -PeakOffsetHigh) {
605  s->peak.offset &= 0xffff;
606  s->peak.offset |= (data & 0xffffU)<<16;
607  s->peak.base = gb;
608  s->peak.level = 0;
609  } else if (tag == -PeakLevel && s->peak.offset) {
610  s->peak.level = data;
611  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
612  } else
613  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
614 
615  if (tag == BitstreamMarker && data == 0xf0f &&
617  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
618  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
619  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
620 
621  if (s->coded_width) {
622  s->coded_width *= factor;
623  }
624 
625  if (s->coded_height) {
626  s->coded_height *= factor;
627  }
628 
629  if (!s->a_width && !s->coded_width) {
630  s->coded_width = lowpass_width * factor * 8;
631  }
632 
633  if (!s->a_height && !s->coded_height) {
634  s->coded_height = lowpass_height * factor * 8;
635  }
636 
637  if (s->a_width && !s->coded_width)
638  s->coded_width = s->a_width;
639  if (s->a_height && !s->coded_height)
640  s->coded_height = s->a_height;
641 
642  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
643  s->a_format != s->coded_format) {
644  free_buffers(s);
645  if ((ret = alloc_buffers(avctx)) < 0) {
646  free_buffers(s);
647  return ret;
648  }
649  }
650  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
651  if (ret < 0)
652  return ret;
653  if (s->cropped_height) {
654  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
655  if (avctx->height < height)
656  return AVERROR_INVALIDDATA;
657  avctx->height = height;
658  }
659  frame.f->width =
660  frame.f->height = 0;
661 
662  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
663  return ret;
664 
665  s->coded_width = 0;
666  s->coded_height = 0;
668  got_buffer = 1;
669  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
670  frame.f->width =
671  frame.f->height = 0;
672 
673  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
674  return ret;
675  s->coded_width = 0;
676  s->coded_height = 0;
678  got_buffer = 1;
679  }
680 
681  if (s->subband_num_actual == 255)
682  goto finish;
683  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
684 
685  /* Lowpass coefficients */
686  if (tag == BitstreamMarker && data == 0xf0f && s->a_width && s->a_height) {
687  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
688  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
689  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
690  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
691 
692  if (lowpass_width < 3 ||
693  lowpass_width > lowpass_a_width) {
694  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
695  ret = AVERROR(EINVAL);
696  goto end;
697  }
698 
699  if (lowpass_height < 3 ||
700  lowpass_height > lowpass_a_height) {
701  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
702  ret = AVERROR(EINVAL);
703  goto end;
704  }
705 
706  if (!got_buffer) {
707  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
708  ret = AVERROR(EINVAL);
709  goto end;
710  }
711 
712  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
713  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
714  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
715  ret = AVERROR(EINVAL);
716  goto end;
717  }
718 
719  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
720  for (i = 0; i < lowpass_height; i++) {
721  for (j = 0; j < lowpass_width; j++)
722  coeff_data[j] = bytestream2_get_be16u(&gb);
723 
724  coeff_data += lowpass_width;
725  }
726 
727  /* Align to mod-4 position to continue reading tags */
728  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
729 
730  /* Copy last line of coefficients if odd height */
731  if (lowpass_height & 1) {
732  memcpy(&coeff_data[lowpass_height * lowpass_width],
733  &coeff_data[(lowpass_height - 1) * lowpass_width],
734  lowpass_width * sizeof(*coeff_data));
735  }
736 
737  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
738  }
739 
740  if ((tag == BandHeader || tag == BandSecondPass) && s->subband_num_actual != 255 && s->a_width && s->a_height) {
741  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
742  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
743  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
744  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
745  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
746  int expected;
747  int a_expected = highpass_a_height * highpass_a_width;
748  int level, run, coeff;
749  int count = 0, bytes;
750 
751  if (!got_buffer) {
752  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
753  ret = AVERROR(EINVAL);
754  goto end;
755  }
756 
757  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
758  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
759  ret = AVERROR(EINVAL);
760  goto end;
761  }
762  expected = highpass_height * highpass_stride;
763 
764  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
765 
766  ret = init_get_bits8(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb));
767  if (ret < 0)
768  goto end;
769  {
770  OPEN_READER(re, &s->gb);
771 
772  const int lossless = s->band_encoding == 5;
773 
774  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
775  s->codebook = 1;
776  if (!s->codebook) {
777  while (1) {
778  UPDATE_CACHE(re, &s->gb);
779  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
780  VLC_BITS, 3, 1);
781 
782  /* escape */
783  if (level == 64)
784  break;
785 
786  count += run;
787 
788  if (count > expected)
789  break;
790 
791  if (!lossless)
792  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
793  else
794  coeff = level;
795  if (tag == BandSecondPass) {
796  const uint16_t q = s->quantisation;
797 
798  for (i = 0; i < run; i++) {
799  *coeff_data |= coeff << 8;
800  *coeff_data++ *= q;
801  }
802  } else {
803  for (i = 0; i < run; i++)
804  *coeff_data++ = coeff;
805  }
806  }
807  } else {
808  while (1) {
809  UPDATE_CACHE(re, &s->gb);
810  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
811  VLC_BITS, 3, 1);
812 
813  /* escape */
814  if (level == 255 && run == 2)
815  break;
816 
817  count += run;
818 
819  if (count > expected)
820  break;
821 
822  if (!lossless)
823  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
824  else
825  coeff = level;
826  if (tag == BandSecondPass) {
827  const uint16_t q = s->quantisation;
828 
829  for (i = 0; i < run; i++) {
830  *coeff_data |= coeff << 8;
831  *coeff_data++ *= q;
832  }
833  } else {
834  for (i = 0; i < run; i++)
835  *coeff_data++ = coeff;
836  }
837  }
838  }
839  CLOSE_READER(re, &s->gb);
840  }
841 
842  if (count > expected) {
843  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
844  ret = AVERROR(EINVAL);
845  goto end;
846  }
847  if (s->peak.level)
848  peak_table(coeff_data - count, &s->peak, count);
849  if (s->difference_coding)
850  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
851 
852  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
853  if (bytes > bytestream2_get_bytes_left(&gb)) {
854  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
855  ret = AVERROR(EINVAL);
856  goto end;
857  } else
858  bytestream2_seek(&gb, bytes, SEEK_CUR);
859 
860  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
861 finish:
862  if (s->subband_num_actual != 255)
863  s->codebook = 0;
864  }
865  }
866 
868  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
869  s->progressive = 1;
870  s->planes = 4;
871  }
872 
873  ff_thread_finish_setup(avctx);
874 
875  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
877  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
878  ret = AVERROR(EINVAL);
879  goto end;
880  }
881 
882  if (!got_buffer) {
883  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
884  ret = AVERROR(EINVAL);
885  goto end;
886  }
887 
888  if (s->transform_type == 0 && s->sample_type != 1) {
889  for (plane = 0; plane < s->planes && !ret; plane++) {
890  /* level 1 */
891  int lowpass_height = s->plane[plane].band[0][0].height;
892  int output_stride = s->plane[plane].band[0][0].a_width;
893  int lowpass_width = s->plane[plane].band[0][0].width;
894  int highpass_stride = s->plane[plane].band[0][1].stride;
895  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
896  ptrdiff_t dst_linesize;
897  int16_t *low, *high, *output, *dst;
898 
899  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
900  act_plane = 0;
901  dst_linesize = pic->linesize[act_plane];
902  } else {
903  dst_linesize = pic->linesize[act_plane] / 2;
904  }
905 
906  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
907  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
908  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
909  ret = AVERROR(EINVAL);
910  goto end;
911  }
912 
913  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
914 
915  low = s->plane[plane].subband[0];
916  high = s->plane[plane].subband[2];
917  output = s->plane[plane].l_h[0];
918  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
919 
920  low = s->plane[plane].subband[1];
921  high = s->plane[plane].subband[3];
922  output = s->plane[plane].l_h[1];
923 
924  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
925 
926  low = s->plane[plane].l_h[0];
927  high = s->plane[plane].l_h[1];
928  output = s->plane[plane].subband[0];
929  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
930  if (s->bpc == 12) {
931  output = s->plane[plane].subband[0];
932  for (i = 0; i < lowpass_height * 2; i++) {
933  for (j = 0; j < lowpass_width * 2; j++)
934  output[j] *= 4;
935 
936  output += output_stride * 2;
937  }
938  }
939 
940  /* level 2 */
941  lowpass_height = s->plane[plane].band[1][1].height;
942  output_stride = s->plane[plane].band[1][1].a_width;
943  lowpass_width = s->plane[plane].band[1][1].width;
944  highpass_stride = s->plane[plane].band[1][1].stride;
945 
946  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
947  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
948  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
949  ret = AVERROR(EINVAL);
950  goto end;
951  }
952 
953  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
954 
955  low = s->plane[plane].subband[0];
956  high = s->plane[plane].subband[5];
957  output = s->plane[plane].l_h[3];
958  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
959 
960  low = s->plane[plane].subband[4];
961  high = s->plane[plane].subband[6];
962  output = s->plane[plane].l_h[4];
963  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
964 
965  low = s->plane[plane].l_h[3];
966  high = s->plane[plane].l_h[4];
967  output = s->plane[plane].subband[0];
968  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
969 
970  output = s->plane[plane].subband[0];
971  for (i = 0; i < lowpass_height * 2; i++) {
972  for (j = 0; j < lowpass_width * 2; j++)
973  output[j] *= 4;
974 
975  output += output_stride * 2;
976  }
977 
978  /* level 3 */
979  lowpass_height = s->plane[plane].band[2][1].height;
980  output_stride = s->plane[plane].band[2][1].a_width;
981  lowpass_width = s->plane[plane].band[2][1].width;
982  highpass_stride = s->plane[plane].band[2][1].stride;
983 
984  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
985  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
986  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
987  ret = AVERROR(EINVAL);
988  goto end;
989  }
990 
991  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
992  if (s->progressive) {
993  low = s->plane[plane].subband[0];
994  high = s->plane[plane].subband[8];
995  output = s->plane[plane].l_h[6];
996  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
997 
998  low = s->plane[plane].subband[7];
999  high = s->plane[plane].subband[9];
1000  output = s->plane[plane].l_h[7];
1001  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1002 
1003  dst = (int16_t *)pic->data[act_plane];
1004  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1005  if (plane & 1)
1006  dst++;
1007  if (plane > 1)
1008  dst += pic->linesize[act_plane] >> 1;
1009  }
1010  low = s->plane[plane].l_h[6];
1011  high = s->plane[plane].l_h[7];
1012 
1013  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1014  (lowpass_height * 2 > avctx->coded_height / 2 ||
1015  lowpass_width * 2 > avctx->coded_width / 2 )
1016  ) {
1017  ret = AVERROR_INVALIDDATA;
1018  goto end;
1019  }
1020 
1021  for (i = 0; i < lowpass_height * 2; i++) {
1022  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1023  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1024  process_alpha(dst, lowpass_width * 2);
1025  low += output_stride;
1026  high += output_stride;
1027  dst += dst_linesize;
1028  }
1029  } else {
1030  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
1031  pic->interlaced_frame = 1;
1032  low = s->plane[plane].subband[0];
1033  high = s->plane[plane].subband[7];
1034  output = s->plane[plane].l_h[6];
1035  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1036 
1037  low = s->plane[plane].subband[8];
1038  high = s->plane[plane].subband[9];
1039  output = s->plane[plane].l_h[7];
1040  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1041 
1042  dst = (int16_t *)pic->data[act_plane];
1043  low = s->plane[plane].l_h[6];
1044  high = s->plane[plane].l_h[7];
1045  for (i = 0; i < lowpass_height; i++) {
1046  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1047  low += output_stride * 2;
1048  high += output_stride * 2;
1049  dst += pic->linesize[act_plane];
1050  }
1051  }
1052  }
1053  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1054  for (plane = 0; plane < s->planes && !ret; plane++) {
1055  int lowpass_height = s->plane[plane].band[0][0].height;
1056  int output_stride = s->plane[plane].band[0][0].a_width;
1057  int lowpass_width = s->plane[plane].band[0][0].width;
1058  int highpass_stride = s->plane[plane].band[0][1].stride;
1059  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1060  int16_t *low, *high, *output, *dst;
1061  ptrdiff_t dst_linesize;
1062 
1063  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1064  act_plane = 0;
1065  dst_linesize = pic->linesize[act_plane];
1066  } else {
1067  dst_linesize = pic->linesize[act_plane] / 2;
1068  }
1069 
1070  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1071  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
1072  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1073  ret = AVERROR(EINVAL);
1074  goto end;
1075  }
1076 
1077  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1078 
1079  low = s->plane[plane].subband[0];
1080  high = s->plane[plane].subband[2];
1081  output = s->plane[plane].l_h[0];
1082  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1083 
1084  low = s->plane[plane].subband[1];
1085  high = s->plane[plane].subband[3];
1086  output = s->plane[plane].l_h[1];
1087  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1088 
1089  low = s->plane[plane].l_h[0];
1090  high = s->plane[plane].l_h[1];
1091  output = s->plane[plane].l_h[7];
1092  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1093  if (s->bpc == 12) {
1094  output = s->plane[plane].l_h[7];
1095  for (i = 0; i < lowpass_height * 2; i++) {
1096  for (j = 0; j < lowpass_width * 2; j++)
1097  output[j] *= 4;
1098 
1099  output += output_stride * 2;
1100  }
1101  }
1102 
1103  lowpass_height = s->plane[plane].band[1][1].height;
1104  output_stride = s->plane[plane].band[1][1].a_width;
1105  lowpass_width = s->plane[plane].band[1][1].width;
1106  highpass_stride = s->plane[plane].band[1][1].stride;
1107 
1108  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1109  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
1110  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1111  ret = AVERROR(EINVAL);
1112  goto end;
1113  }
1114 
1115  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1116 
1117  low = s->plane[plane].l_h[7];
1118  high = s->plane[plane].subband[5];
1119  output = s->plane[plane].l_h[3];
1120  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1121 
1122  low = s->plane[plane].subband[4];
1123  high = s->plane[plane].subband[6];
1124  output = s->plane[plane].l_h[4];
1125  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1126 
1127  low = s->plane[plane].l_h[3];
1128  high = s->plane[plane].l_h[4];
1129  output = s->plane[plane].l_h[7];
1130  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1131 
1132  output = s->plane[plane].l_h[7];
1133  for (i = 0; i < lowpass_height * 2; i++) {
1134  for (j = 0; j < lowpass_width * 2; j++)
1135  output[j] *= 4;
1136  output += output_stride * 2;
1137  }
1138 
1139  low = s->plane[plane].subband[7];
1140  high = s->plane[plane].subband[9];
1141  output = s->plane[plane].l_h[3];
1142  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1143 
1144  low = s->plane[plane].subband[8];
1145  high = s->plane[plane].subband[10];
1146  output = s->plane[plane].l_h[4];
1147  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1148 
1149  low = s->plane[plane].l_h[3];
1150  high = s->plane[plane].l_h[4];
1151  output = s->plane[plane].l_h[9];
1152  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1153 
1154  lowpass_height = s->plane[plane].band[4][1].height;
1155  output_stride = s->plane[plane].band[4][1].a_width;
1156  lowpass_width = s->plane[plane].band[4][1].width;
1157  highpass_stride = s->plane[plane].band[4][1].stride;
1158  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1159 
1160  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1161  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width) {
1162  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1163  ret = AVERROR(EINVAL);
1164  goto end;
1165  }
1166 
1167  low = s->plane[plane].l_h[7];
1168  high = s->plane[plane].l_h[9];
1169  output = s->plane[plane].l_h[7];
1170  for (i = 0; i < lowpass_height; i++) {
1171  inverse_temporal_filter(low, high, lowpass_width);
1172  low += output_stride;
1173  high += output_stride;
1174  }
1175  if (s->progressive) {
1176  low = s->plane[plane].l_h[7];
1177  high = s->plane[plane].subband[15];
1178  output = s->plane[plane].l_h[6];
1179  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1180 
1181  low = s->plane[plane].subband[14];
1182  high = s->plane[plane].subband[16];
1183  output = s->plane[plane].l_h[7];
1184  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1185 
1186  low = s->plane[plane].l_h[9];
1187  high = s->plane[plane].subband[12];
1188  output = s->plane[plane].l_h[8];
1189  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1190 
1191  low = s->plane[plane].subband[11];
1192  high = s->plane[plane].subband[13];
1193  output = s->plane[plane].l_h[9];
1194  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1195 
1196  if (s->sample_type == 1)
1197  continue;
1198 
1199  dst = (int16_t *)pic->data[act_plane];
1200  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1201  if (plane & 1)
1202  dst++;
1203  if (plane > 1)
1204  dst += pic->linesize[act_plane] >> 1;
1205  }
1206 
1207  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1208  (lowpass_height * 2 > avctx->coded_height / 2 ||
1209  lowpass_width * 2 > avctx->coded_width / 2 )
1210  ) {
1211  ret = AVERROR_INVALIDDATA;
1212  goto end;
1213  }
1214 
1215  low = s->plane[plane].l_h[6];
1216  high = s->plane[plane].l_h[7];
1217  for (i = 0; i < lowpass_height * 2; i++) {
1218  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1219  low += output_stride;
1220  high += output_stride;
1221  dst += dst_linesize;
1222  }
1223  } else {
1224  pic->interlaced_frame = 1;
1225  low = s->plane[plane].l_h[7];
1226  high = s->plane[plane].subband[14];
1227  output = s->plane[plane].l_h[6];
1228  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1229 
1230  low = s->plane[plane].subband[15];
1231  high = s->plane[plane].subband[16];
1232  output = s->plane[plane].l_h[7];
1233  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1234 
1235  low = s->plane[plane].l_h[9];
1236  high = s->plane[plane].subband[11];
1237  output = s->plane[plane].l_h[8];
1238  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1239 
1240  low = s->plane[plane].subband[12];
1241  high = s->plane[plane].subband[13];
1242  output = s->plane[plane].l_h[9];
1243  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1244 
1245  if (s->sample_type == 1)
1246  continue;
1247 
1248  dst = (int16_t *)pic->data[act_plane];
1249  low = s->plane[plane].l_h[6];
1250  high = s->plane[plane].l_h[7];
1251  for (i = 0; i < lowpass_height; i++) {
1252  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1253  low += output_stride * 2;
1254  high += output_stride * 2;
1255  dst += pic->linesize[act_plane];
1256  }
1257  }
1258  }
1259  }
1260 
1261  if (s->transform_type == 2 && s->sample_type == 1) {
1262  int16_t *low, *high, *dst;
1263  int output_stride, lowpass_height, lowpass_width;
1264  ptrdiff_t dst_linesize;
1265 
1266  for (plane = 0; plane < s->planes; plane++) {
1267  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1268 
1269  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1270  act_plane = 0;
1271  dst_linesize = pic->linesize[act_plane];
1272  } else {
1273  dst_linesize = pic->linesize[act_plane] / 2;
1274  }
1275 
1276  lowpass_height = s->plane[plane].band[4][1].height;
1277  output_stride = s->plane[plane].band[4][1].a_width;
1278  lowpass_width = s->plane[plane].band[4][1].width;
1279 
1280  if (s->progressive) {
1281  dst = (int16_t *)pic->data[act_plane];
1282  low = s->plane[plane].l_h[8];
1283  high = s->plane[plane].l_h[9];
1284 
1285  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1286  if (plane & 1)
1287  dst++;
1288  if (plane > 1)
1289  dst += pic->linesize[act_plane] >> 1;
1290  }
1291 
1292  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1293  (lowpass_height * 2 > avctx->coded_height / 2 ||
1294  lowpass_width * 2 > avctx->coded_width / 2 )
1295  ) {
1296  ret = AVERROR_INVALIDDATA;
1297  goto end;
1298  }
1299 
1300  for (i = 0; i < lowpass_height * 2; i++) {
1301  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1302  low += output_stride;
1303  high += output_stride;
1304  dst += dst_linesize;
1305  }
1306  } else {
1307  dst = (int16_t *)pic->data[act_plane];
1308  low = s->plane[plane].l_h[8];
1309  high = s->plane[plane].l_h[9];
1310  for (i = 0; i < lowpass_height; i++) {
1311  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1312  low += output_stride * 2;
1313  high += output_stride * 2;
1314  dst += pic->linesize[act_plane];
1315  }
1316  }
1317  }
1318  }
1319 
1320  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1321  process_bayer(pic, s->bpc);
1322 end:
1323  if (ret < 0)
1324  return ret;
1325 
1326  *got_frame = 1;
1327  return avpkt->size;
1328 }
1329 
1331 {
1332  CFHDContext *s = avctx->priv_data;
1333 
1334  free_buffers(s);
1335 
1336  ff_free_vlc(&s->vlc_9);
1337  ff_free_vlc(&s->vlc_18);
1338 
1339  return 0;
1340 }
1341 
1342 #if HAVE_THREADS
1343 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1344 {
1345  CFHDContext *psrc = src->priv_data;
1346  CFHDContext *pdst = dst->priv_data;
1347  int ret;
1348 
1349  if (dst == src || psrc->transform_type == 0)
1350  return 0;
1351 
1352  pdst->a_format = psrc->a_format;
1353  pdst->a_width = psrc->a_width;
1354  pdst->a_height = psrc->a_height;
1355  pdst->transform_type = psrc->transform_type;
1356  pdst->progressive = psrc->progressive;
1357  pdst->planes = psrc->planes;
1358 
1359  if (!pdst->plane[0].idwt_buf) {
1360  pdst->coded_width = pdst->a_width;
1361  pdst->coded_height = pdst->a_height;
1362  pdst->coded_format = pdst->a_format;
1363  ret = alloc_buffers(dst);
1364  if (ret < 0)
1365  return ret;
1366  }
1367 
1368  for (int plane = 0; plane < pdst->planes; plane++) {
1369  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1370  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1371  pdst->plane[plane].idwt_size * sizeof(int16_t));
1372  }
1373 
1374  return 0;
1375 }
1376 #endif
1377 
1379  .name = "cfhd",
1380  .long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
1381  .type = AVMEDIA_TYPE_VIDEO,
1382  .id = AV_CODEC_ID_CFHD,
1383  .priv_data_size = sizeof(CFHDContext),
1384  .init = cfhd_init,
1385  .close = cfhd_close,
1386  .decode = cfhd_decode,
1388  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1390 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
int channel_cnt
Definition: cfhd.h:170
#define NULL
Definition: coverity.c:32
int difference_coding
Definition: cfhd.h:178
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:364
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
VLC vlc_18
Definition: cfhd.h:148
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVCodecContext * avctx
Definition: cfhd.h:142
CFHDDSPContext dsp
Definition: cfhd.h:187
Definition: cfhd.h:86
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
TransformType
Definition: webp.c:110
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:127
float re
Definition: fft.c:82
misc image utilities
AVFrame * f
Definition: thread.h:35
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
Definition: cfhd.h:89
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
int a_height
Definition: cfhd.h:115
int level
Definition: cfhd.h:136
Definition: cfhd.h:72
int size
Definition: packet.h:364
int transform_type
Definition: cfhd.h:158
#define VLC_BITS
Definition: asvdec.c:37
int cropped_height
Definition: cfhd.h:161
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int16_t * idwt_tmp
Definition: cfhd.h:125
int a_width
Definition: cfhd.h:165
ptrdiff_t stride
Definition: cfhd.h:112
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
uint8_t run
Definition: svq3.c:204
int subband_num_actual
Definition: cfhd.h:181
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:206
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
int sample_type
Definition: cfhd.h:157
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:193
Definition: cfhd.h:83
Macro definitions for various function/variable attributes.
int width
Definition: cfhd.h:114
int16_t * subband[SUBBAND_COUNT_3D]
Definition: cfhd.h:129
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
#define av_cold
Definition: attributes.h:88
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t * idwt_buf
Definition: cfhd.h:124
int a_format
Definition: cfhd.h:167
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:236
#define height
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1532
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
#define FFALIGN(x, a)
Definition: macros.h:48
#define SUBBAND_COUNT
Definition: cfhd.h:99
#define av_log(a,...)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
int16_t * l_h[10]
Definition: cfhd.h:130
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
Definition: cfhd.h:147
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
Definition: cfhd.h:144
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static const struct @323 planes[]
FrameType
G723.1 frame types.
Definition: g723_1.h:63
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define R
Definition: huffyuvdsp.h:34
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:132
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:126
VLC vlc_9
Definition: cfhd.h:145
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define DWT_LEVELS
Definition: cfhd.h:108
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
Definition: get_bits.h:738
Definition: cfhd.h:135
int a_width
Definition: cfhd.h:113
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1330
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:192
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:87
int16_t SampleType
Definition: ac3enc.h:70
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
#define b
Definition: input.c:41
GetByteContext base
Definition: cfhd.h:138
int subband_cnt
Definition: cfhd.h:171
#define width
#define FFSIGN(a)
Definition: common.h:73
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:73
uint8_t prescale_table[8]
Definition: cfhd.h:183
uint16_t quantisation
Definition: cfhd.h:175
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int frame_type
Definition: cfhd.h:155
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:43
#define FF_ARRAY_ELEMS(a)
int channel_num
Definition: cfhd.h:173
if(ret)
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:149
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
#define DWT_LEVELS_3D
Definition: cfhd.h:109
GetBitContext gb
Definition: cfhd.h:152
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:526
int codebook
Definition: cfhd.h:177
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:106
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:115
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int planes
Definition: cfhd.h:154
int idwt_size
Definition: cfhd.h:126
int subband_num
Definition: cfhd.h:179
int coded_height
Definition: avcodec.h:714
enum AVPixelFormat coded_format
Definition: cfhd.h:162
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1378
refcounted data buffer API
static const int factor[16]
Definition: vf_pp7.c:75
Peak peak
Definition: cfhd.h:185
int band_encoding
Definition: cfhd.h:172
int level
Definition: cfhd.h:180
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:107
int frame_index
Definition: cfhd.h:156
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:205
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
int coded_width
Definition: cfhd.h:159
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
common internal and external API header
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:135
ptrdiff_t stride
Definition: cfhd.h:122
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int bpc
Definition: cfhd.h:169
void * priv_data
Definition: avcodec.h:553
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
int a_height
Definition: cfhd.h:166
#define SUBBAND_COUNT_3D
Definition: cfhd.h:100
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:80
int height
Definition: cfhd.h:116
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
int width
Definition: cfhd.h:120
int offset
Definition: cfhd.h:137
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:217
int height
Definition: frame.h:366
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
int progressive
Definition: cfhd.h:163
#define av_freep(p)
#define av_malloc_array(a, b)
int lut[2][256]
Definition: cfhd.h:150
#define stride
Plane plane[4]
Definition: cfhd.h:184
int height
Definition: cfhd.h:121
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
Definition: cfhd.h:174
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:358
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:407
int coded_height
Definition: cfhd.h:160
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Tag MUST be even
Definition: snow.txt:206