FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 static av_cold int cfhd_init(AVCodecContext *avctx)
44 {
45  CFHDContext *s = avctx->priv_data;
46 
47  s->avctx = avctx;
48 
49  for (int i = 0; i < 64; i++) {
50  int val = i;
51 
52  if (val >= 40) {
53  if (val >= 54) {
54  val -= 54;
55  val <<= 2;
56  val += 54;
57  }
58 
59  val -= 40;
60  val <<= 2;
61  val += 40;
62  }
63 
64  s->lut[0][i] = val;
65  }
66 
67  for (int i = 0; i < 256; i++)
68  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
69 
70  return ff_cfhd_init_vlcs(s);
71 }
72 
74 {
75  s->subband_num = 0;
76  s->level = 0;
77  s->subband_num_actual = 0;
78 }
79 
81 {
82  s->peak.level = 0;
83  s->peak.offset = 0;
84  memset(&s->peak.base, 0, sizeof(s->peak.base));
85 }
86 
88 {
89  s->coded_width = 0;
90  s->coded_height = 0;
92  s->cropped_height = 0;
93  s->bpc = 10;
94  s->channel_cnt = 3;
96  s->channel_num = 0;
97  s->lowpass_precision = 16;
98  s->quantisation = 1;
99  s->codebook = 0;
100  s->difference_coding = 0;
101  s->frame_type = 0;
102  s->sample_type = 0;
103  if (s->transform_type != 2)
104  s->transform_type = -1;
107 }
108 
109 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
110 {
111  if (codebook == 0 || codebook == 1) {
112  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
113  } else
114  return level * quantisation;
115 }
116 
117 static inline void difference_coding(int16_t *band, int width, int height)
118 {
119 
120  int i,j;
121  for (i = 0; i < height; i++) {
122  for (j = 1; j < width; j++) {
123  band[j] += band[j-1];
124  }
125  band += width;
126  }
127 }
128 
129 static inline void peak_table(int16_t *band, Peak *peak, int length)
130 {
131  int i;
132  for (i = 0; i < length; i++)
133  if (abs(band[i]) > peak->level)
134  band[i] = bytestream2_get_le16(&peak->base);
135 }
136 
137 static inline void process_alpha(int16_t *alpha, int width)
138 {
139  int i, channel;
140  for (i = 0; i < width; i++) {
141  channel = alpha[i];
142  channel -= ALPHA_COMPAND_DC_OFFSET;
143  channel <<= 3;
144  channel *= ALPHA_COMPAND_GAIN;
145  channel >>= 16;
146  channel = av_clip_uintp2(channel, 12);
147  alpha[i] = channel;
148  }
149 }
150 
151 static inline void process_bayer(AVFrame *frame, int bpc)
152 {
153  const int linesize = frame->linesize[0];
154  uint16_t *r = (uint16_t *)frame->data[0];
155  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
156  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
157  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
158  const int mid = 1 << (bpc - 1);
159  const int factor = 1 << (16 - bpc);
160 
161  for (int y = 0; y < frame->height >> 1; y++) {
162  for (int x = 0; x < frame->width; x += 2) {
163  int R, G1, G2, B;
164  int g, rg, bg, gd;
165 
166  g = r[x];
167  rg = g1[x];
168  bg = g2[x];
169  gd = b[x];
170  gd -= mid;
171 
172  R = (rg - mid) * 2 + g;
173  G1 = g + gd;
174  G2 = g - gd;
175  B = (bg - mid) * 2 + g;
176 
177  R = av_clip_uintp2(R * factor, 16);
178  G1 = av_clip_uintp2(G1 * factor, 16);
179  G2 = av_clip_uintp2(G2 * factor, 16);
180  B = av_clip_uintp2(B * factor, 16);
181 
182  r[x] = R;
183  g1[x] = G1;
184  g2[x] = G2;
185  b[x] = B;
186  }
187 
188  r += linesize;
189  g1 += linesize;
190  g2 += linesize;
191  b += linesize;
192  }
193 }
194 
195 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
196  int width, int linesize, int plane)
197 {
198  int i;
199  int16_t even, odd;
200  for (i = 0; i < width; i++) {
201  even = (low[i] - high[i])/2;
202  odd = (low[i] + high[i])/2;
203  output[i] = av_clip_uintp2(even, 10);
204  output[i + linesize] = av_clip_uintp2(odd, 10);
205  }
206 }
207 
208 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
209 {
210  for (int i = 0; i < width; i++) {
211  int even = (low[i] - high[i]) / 2;
212  int odd = (low[i] + high[i]) / 2;
213 
214  low[i] = even;
215  high[i] = odd;
216  }
217 }
218 
220 {
221  int i, j;
222 
223  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
224  av_freep(&s->plane[i].idwt_buf);
225  av_freep(&s->plane[i].idwt_tmp);
226  s->plane[i].idwt_size = 0;
227 
228  for (j = 0; j < SUBBAND_COUNT_3D; j++)
229  s->plane[i].subband[j] = NULL;
230 
231  for (j = 0; j < 10; j++)
232  s->plane[i].l_h[j] = NULL;
233  }
234  s->a_height = 0;
235  s->a_width = 0;
236 }
237 
238 static int alloc_buffers(AVCodecContext *avctx)
239 {
240  CFHDContext *s = avctx->priv_data;
241  int i, j, ret, planes, bayer = 0;
242  int chroma_x_shift, chroma_y_shift;
243  unsigned k;
244 
245  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
246  return ret;
247  avctx->pix_fmt = s->coded_format;
248 
250 
252  &chroma_x_shift,
253  &chroma_y_shift)) < 0)
254  return ret;
257  planes = 4;
258  chroma_x_shift = 1;
259  chroma_y_shift = 1;
260  bayer = 1;
261  }
262 
263  for (i = 0; i < planes; i++) {
264  int w8, h8, w4, h4, w2, h2;
265  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
266  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
267  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
268 
269  if (chroma_y_shift && !bayer)
270  height = FFALIGN(height / 8, 2) * 8;
271  s->plane[i].width = width;
272  s->plane[i].height = height;
273  s->plane[i].stride = stride;
274 
275  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
276  h8 = FFALIGN(height, 8) / 8;
277  w4 = w8 * 2;
278  h4 = h8 * 2;
279  w2 = w4 * 2;
280  h2 = h4 * 2;
281 
282  if (s->transform_type == 0) {
283  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
284  s->plane[i].idwt_buf =
285  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
286  s->plane[i].idwt_tmp =
287  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
288  } else {
289  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
290  s->plane[i].idwt_buf =
291  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
292  s->plane[i].idwt_tmp =
293  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
294  }
295 
296  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
297  return AVERROR(ENOMEM);
298 
299  s->plane[i].subband[0] = s->plane[i].idwt_buf;
300  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
301  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
302  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
303  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
304  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
305  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
306  if (s->transform_type == 0) {
307  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
308  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
309  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
310  } else {
311  int16_t *frame2 =
312  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
313  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
314  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
315  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
316  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
317  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
318  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
319  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
320  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
321  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
322  }
323 
324  if (s->transform_type == 0) {
325  for (j = 0; j < DWT_LEVELS; j++) {
326  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
327  s->plane[i].band[j][k].a_width = w8 << j;
328  s->plane[i].band[j][k].a_height = h8 << j;
329  }
330  }
331  } else {
332  for (j = 0; j < DWT_LEVELS_3D; j++) {
333  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
334 
335  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
336  s->plane[i].band[j][k].a_width = w8 << t;
337  s->plane[i].band[j][k].a_height = h8 << t;
338  }
339  }
340  }
341 
342  /* ll2 and ll1 commented out because they are done in-place */
343  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
344  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
345  // s->plane[i].l_h[2] = ll2;
346  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
347  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
348  // s->plane[i].l_h[5] = ll1;
349  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
350  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
351  if (s->transform_type != 0) {
352  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
353 
354  s->plane[i].l_h[8] = frame2;
355  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
356  }
357  }
358 
359  s->a_height = s->coded_height;
360  s->a_width = s->coded_width;
361  s->a_format = s->coded_format;
362 
363  return 0;
364 }
365 
366 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
367  AVPacket *avpkt)
368 {
369  CFHDContext *s = avctx->priv_data;
370  CFHDDSPContext *dsp = &s->dsp;
371  GetByteContext gb;
372  ThreadFrame frame = { .f = data };
373  AVFrame *pic = data;
374  int ret = 0, i, j, plane, got_buffer = 0;
375  int16_t *coeff_data;
376 
379 
380  bytestream2_init(&gb, avpkt->data, avpkt->size);
381 
382  while (bytestream2_get_bytes_left(&gb) >= 4) {
383  /* Bit weird but implement the tag parsing as the spec says */
384  uint16_t tagu = bytestream2_get_be16(&gb);
385  int16_t tag = (int16_t)tagu;
386  int8_t tag8 = (int8_t)(tagu >> 8);
387  uint16_t abstag = abs(tag);
388  int8_t abs_tag8 = abs(tag8);
389  uint16_t data = bytestream2_get_be16(&gb);
390  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
391  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
392  } else if (tag == SampleFlags) {
393  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
394  s->progressive = data & 0x0001;
395  } else if (tag == FrameType) {
396  s->frame_type = data;
397  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
398  } else if (abstag == VersionMajor) {
399  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
400  } else if (abstag == VersionMinor) {
401  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
402  } else if (abstag == VersionRevision) {
403  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
404  } else if (abstag == VersionEdit) {
405  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
406  } else if (abstag == Version) {
407  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
408  } else if (tag == ImageWidth) {
409  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
410  s->coded_width = data;
411  } else if (tag == ImageHeight) {
412  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
413  s->coded_height = data;
414  } else if (tag == ChannelCount) {
415  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
416  s->channel_cnt = data;
417  if (data > 4) {
418  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
419  ret = AVERROR_PATCHWELCOME;
420  goto end;
421  }
422  } else if (tag == SubbandCount) {
423  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
424  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
425  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
426  ret = AVERROR_PATCHWELCOME;
427  goto end;
428  }
429  } else if (tag == ChannelNumber) {
430  s->channel_num = data;
431  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
432  if (s->channel_num >= s->planes) {
433  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
434  ret = AVERROR(EINVAL);
435  goto end;
436  }
438  } else if (tag == SubbandNumber) {
439  if (s->subband_num != 0 && data == 1) // hack
440  s->level++;
441  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
442  s->subband_num = data;
443  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
444  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
445  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
446  ret = AVERROR(EINVAL);
447  goto end;
448  }
449  if (s->subband_num > 3) {
450  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
451  ret = AVERROR(EINVAL);
452  goto end;
453  }
454  } else if (tag == SubbandBand) {
455  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
456  if ((s->transform_type == 0 && data >= SUBBAND_COUNT) ||
457  (s->transform_type == 2 && data >= SUBBAND_COUNT_3D && data != 255)) {
458  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
459  ret = AVERROR(EINVAL);
460  goto end;
461  }
462  if (s->transform_type == 0 || s->transform_type == 2)
464  else
465  av_log(avctx, AV_LOG_WARNING, "Ignoring subband num actual %"PRIu16"\n", data);
466  } else if (tag == LowpassPrecision)
467  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
468  else if (tag == Quantization) {
469  s->quantisation = data;
470  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
471  } else if (tag == PrescaleTable) {
472  for (i = 0; i < 8; i++)
473  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
474  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
475  } else if (tag == BandEncoding) {
476  if (!data || data > 5) {
477  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
478  ret = AVERROR(EINVAL);
479  goto end;
480  }
481  s->band_encoding = data;
482  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
483  } else if (tag == LowpassWidth) {
484  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
485  s->plane[s->channel_num].band[0][0].width = data;
486  s->plane[s->channel_num].band[0][0].stride = data;
487  } else if (tag == LowpassHeight) {
488  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
489  s->plane[s->channel_num].band[0][0].height = data;
490  } else if (tag == SampleType) {
491  s->sample_type = data;
492  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
493  } else if (tag == TransformType) {
494  if (data > 2) {
495  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
496  ret = AVERROR(EINVAL);
497  goto end;
498  } else if (data == 1) {
499  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
500  ret = AVERROR_PATCHWELCOME;
501  goto end;
502  }
503  if (s->transform_type == -1) {
504  s->transform_type = data;
505  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
506  } else {
507  av_log(avctx, AV_LOG_DEBUG, "Ignoring additional transform type %"PRIu16"\n", data);
508  }
509  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
510  if (abstag == 0x4001)
511  s->peak.level = 0;
512  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
513  bytestream2_skipu(&gb, data * 4);
514  } else if (tag == FrameIndex) {
515  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
516  s->frame_index = data;
517  } else if (tag == SampleIndexTable) {
518  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
519  if (data > bytestream2_get_bytes_left(&gb) / 4) {
520  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
521  ret = AVERROR_INVALIDDATA;
522  goto end;
523  }
524  for (i = 0; i < data; i++) {
525  uint32_t offset = bytestream2_get_be32(&gb);
526  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
527  }
528  } else if (tag == HighpassWidth) {
529  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
530  if (data < 3) {
531  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
532  ret = AVERROR(EINVAL);
533  goto end;
534  }
535  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
536  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
537  } else if (tag == HighpassHeight) {
538  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
539  if (data < 3) {
540  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
541  ret = AVERROR(EINVAL);
542  goto end;
543  }
544  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
545  } else if (tag == BandWidth) {
546  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
547  if (data < 3) {
548  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
549  ret = AVERROR(EINVAL);
550  goto end;
551  }
552  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
553  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
554  } else if (tag == BandHeight) {
555  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
556  if (data < 3) {
557  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
558  ret = AVERROR(EINVAL);
559  goto end;
560  }
561  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
562  } else if (tag == InputFormat) {
563  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
564  if (s->coded_format == AV_PIX_FMT_NONE ||
566  if (data >= 100 && data <= 105) {
568  } else if (data >= 122 && data <= 128) {
570  } else if (data == 30) {
572  } else {
574  }
576  }
577  } else if (tag == BandCodingFlags) {
578  s->codebook = data & 0xf;
579  s->difference_coding = (data >> 4) & 1;
580  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
581  } else if (tag == Precision) {
582  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
583  if (!(data == 10 || data == 12)) {
584  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
585  ret = AVERROR(EINVAL);
586  goto end;
587  }
588  avctx->bits_per_raw_sample = s->bpc = data;
589  } else if (tag == EncodedFormat) {
590  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
591  if (data == 1) {
593  } else if (data == 2) {
595  } else if (data == 3) {
597  } else if (data == 4) {
599  } else {
600  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
601  ret = AVERROR_PATCHWELCOME;
602  goto end;
603  }
604  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
605  } else if (tag == -DisplayHeight) {
606  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
607  s->cropped_height = data;
608  } else if (tag == -PeakOffsetLow) {
609  s->peak.offset &= ~0xffff;
610  s->peak.offset |= (data & 0xffff);
611  s->peak.base = gb;
612  s->peak.level = 0;
613  } else if (tag == -PeakOffsetHigh) {
614  s->peak.offset &= 0xffff;
615  s->peak.offset |= (data & 0xffffU)<<16;
616  s->peak.base = gb;
617  s->peak.level = 0;
618  } else if (tag == -PeakLevel && s->peak.offset) {
619  s->peak.level = data;
620  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
621  } else
622  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
623 
624  if (tag == BitstreamMarker && data == 0xf0f &&
626  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
627  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
628  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
629 
630  if (s->coded_width) {
631  s->coded_width *= factor;
632  }
633 
634  if (s->coded_height) {
635  s->coded_height *= factor;
636  }
637 
638  if (!s->a_width && !s->coded_width) {
639  s->coded_width = lowpass_width * factor * 8;
640  }
641 
642  if (!s->a_height && !s->coded_height) {
643  s->coded_height = lowpass_height * factor * 8;
644  }
645 
646  if (s->a_width && !s->coded_width)
647  s->coded_width = s->a_width;
648  if (s->a_height && !s->coded_height)
649  s->coded_height = s->a_height;
650 
651  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
652  s->a_format != s->coded_format) {
653  free_buffers(s);
654  if ((ret = alloc_buffers(avctx)) < 0) {
655  free_buffers(s);
656  return ret;
657  }
658  }
659  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
660  if (ret < 0)
661  return ret;
662  if (s->cropped_height) {
663  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
664  if (avctx->height < height)
665  return AVERROR_INVALIDDATA;
666  avctx->height = height;
667  }
668  frame.f->width =
669  frame.f->height = 0;
670 
671  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
672  return ret;
673 
674  s->coded_width = 0;
675  s->coded_height = 0;
677  got_buffer = 1;
678  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
679  frame.f->width =
680  frame.f->height = 0;
681 
682  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
683  return ret;
684  s->coded_width = 0;
685  s->coded_height = 0;
687  got_buffer = 1;
688  }
689 
690  if (s->subband_num_actual == 255)
691  goto finish;
692  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
693 
694  /* Lowpass coefficients */
695  if (tag == BitstreamMarker && data == 0xf0f && s->a_width && s->a_height) {
696  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
697  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
698  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
699  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
700 
701  if (lowpass_width < 3 ||
702  lowpass_width > lowpass_a_width) {
703  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
704  ret = AVERROR(EINVAL);
705  goto end;
706  }
707 
708  if (lowpass_height < 3 ||
709  lowpass_height > lowpass_a_height) {
710  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
711  ret = AVERROR(EINVAL);
712  goto end;
713  }
714 
715  if (!got_buffer) {
716  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
717  ret = AVERROR(EINVAL);
718  goto end;
719  }
720 
721  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
722  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
723  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
724  ret = AVERROR(EINVAL);
725  goto end;
726  }
727 
728  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
729  for (i = 0; i < lowpass_height; i++) {
730  for (j = 0; j < lowpass_width; j++)
731  coeff_data[j] = bytestream2_get_be16u(&gb);
732 
733  coeff_data += lowpass_width;
734  }
735 
736  /* Align to mod-4 position to continue reading tags */
737  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
738 
739  /* Copy last line of coefficients if odd height */
740  if (lowpass_height & 1) {
741  memcpy(&coeff_data[lowpass_height * lowpass_width],
742  &coeff_data[(lowpass_height - 1) * lowpass_width],
743  lowpass_width * sizeof(*coeff_data));
744  }
745 
746  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
747  }
748 
749  if ((tag == BandHeader || tag == BandSecondPass) && s->subband_num_actual != 255 && s->a_width && s->a_height) {
750  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
751  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
752  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
753  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
754  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
755  int expected;
756  int a_expected = highpass_a_height * highpass_a_width;
757  int level, run, coeff;
758  int count = 0, bytes;
759 
760  if (!got_buffer) {
761  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
762  ret = AVERROR(EINVAL);
763  goto end;
764  }
765 
766  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
767  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
768  ret = AVERROR(EINVAL);
769  goto end;
770  }
771  expected = highpass_height * highpass_stride;
772 
773  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
774 
775  ret = init_get_bits8(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb));
776  if (ret < 0)
777  goto end;
778  {
779  OPEN_READER(re, &s->gb);
780 
781  const int lossless = s->band_encoding == 5;
782 
783  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
784  s->codebook = 1;
785  if (!s->codebook) {
786  while (1) {
787  UPDATE_CACHE(re, &s->gb);
788  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
789  VLC_BITS, 3, 1);
790 
791  /* escape */
792  if (level == 64)
793  break;
794 
795  count += run;
796 
797  if (count > expected)
798  break;
799 
800  if (!lossless)
801  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
802  else
803  coeff = level;
804  if (tag == BandSecondPass) {
805  const uint16_t q = s->quantisation;
806 
807  for (i = 0; i < run; i++) {
808  *coeff_data |= coeff << 8;
809  *coeff_data++ *= q;
810  }
811  } else {
812  for (i = 0; i < run; i++)
813  *coeff_data++ = coeff;
814  }
815  }
816  } else {
817  while (1) {
818  UPDATE_CACHE(re, &s->gb);
819  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
820  VLC_BITS, 3, 1);
821 
822  /* escape */
823  if (level == 255 && run == 2)
824  break;
825 
826  count += run;
827 
828  if (count > expected)
829  break;
830 
831  if (!lossless)
832  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
833  else
834  coeff = level;
835  if (tag == BandSecondPass) {
836  const uint16_t q = s->quantisation;
837 
838  for (i = 0; i < run; i++) {
839  *coeff_data |= coeff << 8;
840  *coeff_data++ *= q;
841  }
842  } else {
843  for (i = 0; i < run; i++)
844  *coeff_data++ = coeff;
845  }
846  }
847  }
848  CLOSE_READER(re, &s->gb);
849  }
850 
851  if (count > expected) {
852  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
853  ret = AVERROR(EINVAL);
854  goto end;
855  }
856  if (s->peak.level)
857  peak_table(coeff_data - count, &s->peak, count);
858  if (s->difference_coding)
859  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
860 
861  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
862  if (bytes > bytestream2_get_bytes_left(&gb)) {
863  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
864  ret = AVERROR(EINVAL);
865  goto end;
866  } else
867  bytestream2_seek(&gb, bytes, SEEK_CUR);
868 
869  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
870 finish:
871  if (s->subband_num_actual != 255)
872  s->codebook = 0;
873  }
874  }
875 
877  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
878  s->progressive = 1;
879  s->planes = 4;
880  }
881 
882  ff_thread_finish_setup(avctx);
883 
884  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
886  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
887  ret = AVERROR(EINVAL);
888  goto end;
889  }
890 
891  if (!got_buffer) {
892  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
893  ret = AVERROR(EINVAL);
894  goto end;
895  }
896 
897  if (s->transform_type == 0 && s->sample_type != 1) {
898  for (plane = 0; plane < s->planes && !ret; plane++) {
899  /* level 1 */
900  int lowpass_height = s->plane[plane].band[0][0].height;
901  int output_stride = s->plane[plane].band[0][0].a_width;
902  int lowpass_width = s->plane[plane].band[0][0].width;
903  int highpass_stride = s->plane[plane].band[0][1].stride;
904  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
905  ptrdiff_t dst_linesize;
906  int16_t *low, *high, *output, *dst;
907 
908  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
909  act_plane = 0;
910  dst_linesize = pic->linesize[act_plane];
911  } else {
912  dst_linesize = pic->linesize[act_plane] / 2;
913  }
914 
915  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
916  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
917  lowpass_width < 3 || lowpass_height < 3) {
918  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
919  ret = AVERROR(EINVAL);
920  goto end;
921  }
922 
923  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
924 
925  low = s->plane[plane].subband[0];
926  high = s->plane[plane].subband[2];
927  output = s->plane[plane].l_h[0];
928  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
929 
930  low = s->plane[plane].subband[1];
931  high = s->plane[plane].subband[3];
932  output = s->plane[plane].l_h[1];
933 
934  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
935 
936  low = s->plane[plane].l_h[0];
937  high = s->plane[plane].l_h[1];
938  output = s->plane[plane].subband[0];
939  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
940  if (s->bpc == 12) {
941  output = s->plane[plane].subband[0];
942  for (i = 0; i < lowpass_height * 2; i++) {
943  for (j = 0; j < lowpass_width * 2; j++)
944  output[j] *= 4;
945 
946  output += output_stride * 2;
947  }
948  }
949 
950  /* level 2 */
951  lowpass_height = s->plane[plane].band[1][1].height;
952  output_stride = s->plane[plane].band[1][1].a_width;
953  lowpass_width = s->plane[plane].band[1][1].width;
954  highpass_stride = s->plane[plane].band[1][1].stride;
955 
956  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
957  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
958  lowpass_width < 3 || lowpass_height < 3) {
959  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
960  ret = AVERROR(EINVAL);
961  goto end;
962  }
963 
964  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
965 
966  low = s->plane[plane].subband[0];
967  high = s->plane[plane].subband[5];
968  output = s->plane[plane].l_h[3];
969  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
970 
971  low = s->plane[plane].subband[4];
972  high = s->plane[plane].subband[6];
973  output = s->plane[plane].l_h[4];
974  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
975 
976  low = s->plane[plane].l_h[3];
977  high = s->plane[plane].l_h[4];
978  output = s->plane[plane].subband[0];
979  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
980 
981  output = s->plane[plane].subband[0];
982  for (i = 0; i < lowpass_height * 2; i++) {
983  for (j = 0; j < lowpass_width * 2; j++)
984  output[j] *= 4;
985 
986  output += output_stride * 2;
987  }
988 
989  /* level 3 */
990  lowpass_height = s->plane[plane].band[2][1].height;
991  output_stride = s->plane[plane].band[2][1].a_width;
992  lowpass_width = s->plane[plane].band[2][1].width;
993  highpass_stride = s->plane[plane].band[2][1].stride;
994 
995  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
996  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width ||
997  lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->plane[plane].width) {
998  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
999  ret = AVERROR(EINVAL);
1000  goto end;
1001  }
1002 
1003  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1004  if (s->progressive) {
1005  low = s->plane[plane].subband[0];
1006  high = s->plane[plane].subband[8];
1007  output = s->plane[plane].l_h[6];
1008  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1009 
1010  low = s->plane[plane].subband[7];
1011  high = s->plane[plane].subband[9];
1012  output = s->plane[plane].l_h[7];
1013  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1014 
1015  dst = (int16_t *)pic->data[act_plane];
1016  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1017  if (plane & 1)
1018  dst++;
1019  if (plane > 1)
1020  dst += pic->linesize[act_plane] >> 1;
1021  }
1022  low = s->plane[plane].l_h[6];
1023  high = s->plane[plane].l_h[7];
1024 
1025  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1026  (lowpass_height * 2 > avctx->coded_height / 2 ||
1027  lowpass_width * 2 > avctx->coded_width / 2 )
1028  ) {
1029  ret = AVERROR_INVALIDDATA;
1030  goto end;
1031  }
1032 
1033  for (i = 0; i < s->plane[act_plane].height; i++) {
1034  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1035  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1036  process_alpha(dst, lowpass_width * 2);
1037  low += output_stride;
1038  high += output_stride;
1039  dst += dst_linesize;
1040  }
1041  } else {
1042  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
1043  pic->interlaced_frame = 1;
1044  low = s->plane[plane].subband[0];
1045  high = s->plane[plane].subband[7];
1046  output = s->plane[plane].l_h[6];
1047  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1048 
1049  low = s->plane[plane].subband[8];
1050  high = s->plane[plane].subband[9];
1051  output = s->plane[plane].l_h[7];
1052  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1053 
1054  dst = (int16_t *)pic->data[act_plane];
1055  low = s->plane[plane].l_h[6];
1056  high = s->plane[plane].l_h[7];
1057  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1058  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1059  low += output_stride * 2;
1060  high += output_stride * 2;
1061  dst += pic->linesize[act_plane];
1062  }
1063  }
1064  }
1065  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1066  for (plane = 0; plane < s->planes && !ret; plane++) {
1067  int lowpass_height = s->plane[plane].band[0][0].height;
1068  int output_stride = s->plane[plane].band[0][0].a_width;
1069  int lowpass_width = s->plane[plane].band[0][0].width;
1070  int highpass_stride = s->plane[plane].band[0][1].stride;
1071  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1072  int16_t *low, *high, *output, *dst;
1073  ptrdiff_t dst_linesize;
1074 
1075  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1076  act_plane = 0;
1077  dst_linesize = pic->linesize[act_plane];
1078  } else {
1079  dst_linesize = pic->linesize[act_plane] / 2;
1080  }
1081 
1082  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1083  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
1084  lowpass_width < 3 || lowpass_height < 3) {
1085  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1086  ret = AVERROR(EINVAL);
1087  goto end;
1088  }
1089 
1090  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1091 
1092  low = s->plane[plane].subband[0];
1093  high = s->plane[plane].subband[2];
1094  output = s->plane[plane].l_h[0];
1095  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1096 
1097  low = s->plane[plane].subband[1];
1098  high = s->plane[plane].subband[3];
1099  output = s->plane[plane].l_h[1];
1100  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1101 
1102  low = s->plane[plane].l_h[0];
1103  high = s->plane[plane].l_h[1];
1104  output = s->plane[plane].l_h[7];
1105  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1106  if (s->bpc == 12) {
1107  output = s->plane[plane].l_h[7];
1108  for (i = 0; i < lowpass_height * 2; i++) {
1109  for (j = 0; j < lowpass_width * 2; j++)
1110  output[j] *= 4;
1111 
1112  output += output_stride * 2;
1113  }
1114  }
1115 
1116  lowpass_height = s->plane[plane].band[1][1].height;
1117  output_stride = s->plane[plane].band[1][1].a_width;
1118  lowpass_width = s->plane[plane].band[1][1].width;
1119  highpass_stride = s->plane[plane].band[1][1].stride;
1120 
1121  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1122  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1123  lowpass_width < 3 || lowpass_height < 3) {
1124  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1125  ret = AVERROR(EINVAL);
1126  goto end;
1127  }
1128 
1129  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1130 
1131  low = s->plane[plane].l_h[7];
1132  high = s->plane[plane].subband[5];
1133  output = s->plane[plane].l_h[3];
1134  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1135 
1136  low = s->plane[plane].subband[4];
1137  high = s->plane[plane].subband[6];
1138  output = s->plane[plane].l_h[4];
1139  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1140 
1141  low = s->plane[plane].l_h[3];
1142  high = s->plane[plane].l_h[4];
1143  output = s->plane[plane].l_h[7];
1144  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1145 
1146  output = s->plane[plane].l_h[7];
1147  for (i = 0; i < lowpass_height * 2; i++) {
1148  for (j = 0; j < lowpass_width * 2; j++)
1149  output[j] *= 4;
1150  output += output_stride * 2;
1151  }
1152 
1153  low = s->plane[plane].subband[7];
1154  high = s->plane[plane].subband[9];
1155  output = s->plane[plane].l_h[3];
1156  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1157 
1158  low = s->plane[plane].subband[8];
1159  high = s->plane[plane].subband[10];
1160  output = s->plane[plane].l_h[4];
1161  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1162 
1163  low = s->plane[plane].l_h[3];
1164  high = s->plane[plane].l_h[4];
1165  output = s->plane[plane].l_h[9];
1166  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1167 
1168  lowpass_height = s->plane[plane].band[4][1].height;
1169  output_stride = s->plane[plane].band[4][1].a_width;
1170  lowpass_width = s->plane[plane].band[4][1].width;
1171  highpass_stride = s->plane[plane].band[4][1].stride;
1172  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1173 
1174  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1175  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1176  lowpass_width < 3 || lowpass_height < 3) {
1177  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1178  ret = AVERROR(EINVAL);
1179  goto end;
1180  }
1181 
1182  low = s->plane[plane].l_h[7];
1183  high = s->plane[plane].l_h[9];
1184  output = s->plane[plane].l_h[7];
1185  for (i = 0; i < lowpass_height; i++) {
1186  inverse_temporal_filter(low, high, lowpass_width);
1187  low += output_stride;
1188  high += output_stride;
1189  }
1190  if (s->progressive) {
1191  low = s->plane[plane].l_h[7];
1192  high = s->plane[plane].subband[15];
1193  output = s->plane[plane].l_h[6];
1194  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1195 
1196  low = s->plane[plane].subband[14];
1197  high = s->plane[plane].subband[16];
1198  output = s->plane[plane].l_h[7];
1199  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1200 
1201  low = s->plane[plane].l_h[9];
1202  high = s->plane[plane].subband[12];
1203  output = s->plane[plane].l_h[8];
1204  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1205 
1206  low = s->plane[plane].subband[11];
1207  high = s->plane[plane].subband[13];
1208  output = s->plane[plane].l_h[9];
1209  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1210 
1211  if (s->sample_type == 1)
1212  continue;
1213 
1214  dst = (int16_t *)pic->data[act_plane];
1215  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1216  if (plane & 1)
1217  dst++;
1218  if (plane > 1)
1219  dst += pic->linesize[act_plane] >> 1;
1220  }
1221 
1222  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1223  (lowpass_height * 2 > avctx->coded_height / 2 ||
1224  lowpass_width * 2 > avctx->coded_width / 2 )
1225  ) {
1226  ret = AVERROR_INVALIDDATA;
1227  goto end;
1228  }
1229 
1230  low = s->plane[plane].l_h[6];
1231  high = s->plane[plane].l_h[7];
1232  for (i = 0; i < s->plane[act_plane].height; i++) {
1233  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1234  low += output_stride;
1235  high += output_stride;
1236  dst += dst_linesize;
1237  }
1238  } else {
1239  pic->interlaced_frame = 1;
1240  low = s->plane[plane].l_h[7];
1241  high = s->plane[plane].subband[14];
1242  output = s->plane[plane].l_h[6];
1243  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1244 
1245  low = s->plane[plane].subband[15];
1246  high = s->plane[plane].subband[16];
1247  output = s->plane[plane].l_h[7];
1248  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1249 
1250  low = s->plane[plane].l_h[9];
1251  high = s->plane[plane].subband[11];
1252  output = s->plane[plane].l_h[8];
1253  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1254 
1255  low = s->plane[plane].subband[12];
1256  high = s->plane[plane].subband[13];
1257  output = s->plane[plane].l_h[9];
1258  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1259 
1260  if (s->sample_type == 1)
1261  continue;
1262 
1263  dst = (int16_t *)pic->data[act_plane];
1264  low = s->plane[plane].l_h[6];
1265  high = s->plane[plane].l_h[7];
1266  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1267  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1268  low += output_stride * 2;
1269  high += output_stride * 2;
1270  dst += pic->linesize[act_plane];
1271  }
1272  }
1273  }
1274  }
1275 
1276  if (s->transform_type == 2 && s->sample_type == 1) {
1277  int16_t *low, *high, *dst;
1278  int output_stride, lowpass_height, lowpass_width;
1279  ptrdiff_t dst_linesize;
1280 
1281  for (plane = 0; plane < s->planes; plane++) {
1282  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1283 
1284  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1285  act_plane = 0;
1286  dst_linesize = pic->linesize[act_plane];
1287  } else {
1288  dst_linesize = pic->linesize[act_plane] / 2;
1289  }
1290 
1291  lowpass_height = s->plane[plane].band[4][1].height;
1292  output_stride = s->plane[plane].band[4][1].a_width;
1293  lowpass_width = s->plane[plane].band[4][1].width;
1294 
1295  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1296  s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1297  lowpass_width < 3 || lowpass_height < 3) {
1298  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1299  ret = AVERROR(EINVAL);
1300  goto end;
1301  }
1302 
1303  if (s->progressive) {
1304  dst = (int16_t *)pic->data[act_plane];
1305  low = s->plane[plane].l_h[8];
1306  high = s->plane[plane].l_h[9];
1307 
1308  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1309  if (plane & 1)
1310  dst++;
1311  if (plane > 1)
1312  dst += pic->linesize[act_plane] >> 1;
1313  }
1314 
1315  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1316  (lowpass_height * 2 > avctx->coded_height / 2 ||
1317  lowpass_width * 2 > avctx->coded_width / 2 )
1318  ) {
1319  ret = AVERROR_INVALIDDATA;
1320  goto end;
1321  }
1322 
1323  for (i = 0; i < s->plane[act_plane].height; i++) {
1324  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1325  low += output_stride;
1326  high += output_stride;
1327  dst += dst_linesize;
1328  }
1329  } else {
1330  dst = (int16_t *)pic->data[act_plane];
1331  low = s->plane[plane].l_h[8];
1332  high = s->plane[plane].l_h[9];
1333  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1334  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1335  low += output_stride * 2;
1336  high += output_stride * 2;
1337  dst += pic->linesize[act_plane];
1338  }
1339  }
1340  }
1341  }
1342 
1343  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1344  process_bayer(pic, s->bpc);
1345 end:
1346  if (ret < 0)
1347  return ret;
1348 
1349  *got_frame = 1;
1350  return avpkt->size;
1351 }
1352 
1354 {
1355  CFHDContext *s = avctx->priv_data;
1356 
1357  free_buffers(s);
1358 
1359  ff_free_vlc(&s->vlc_9);
1360  ff_free_vlc(&s->vlc_18);
1361 
1362  return 0;
1363 }
1364 
1365 #if HAVE_THREADS
1366 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1367 {
1368  CFHDContext *psrc = src->priv_data;
1369  CFHDContext *pdst = dst->priv_data;
1370  int ret;
1371 
1372  if (dst == src || psrc->transform_type == 0)
1373  return 0;
1374 
1375  pdst->a_format = psrc->a_format;
1376  pdst->a_width = psrc->a_width;
1377  pdst->a_height = psrc->a_height;
1378  pdst->transform_type = psrc->transform_type;
1379  pdst->progressive = psrc->progressive;
1380  pdst->planes = psrc->planes;
1381 
1382  if (!pdst->plane[0].idwt_buf) {
1383  pdst->coded_width = pdst->a_width;
1384  pdst->coded_height = pdst->a_height;
1385  pdst->coded_format = pdst->a_format;
1386  ret = alloc_buffers(dst);
1387  if (ret < 0)
1388  return ret;
1389  }
1390 
1391  for (int plane = 0; plane < pdst->planes; plane++) {
1392  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1393  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1394  pdst->plane[plane].idwt_size * sizeof(int16_t));
1395  }
1396 
1397  return 0;
1398 }
1399 #endif
1400 
1402  .name = "cfhd",
1403  .long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
1404  .type = AVMEDIA_TYPE_VIDEO,
1405  .id = AV_CODEC_ID_CFHD,
1406  .priv_data_size = sizeof(CFHDContext),
1407  .init = cfhd_init,
1408  .close = cfhd_close,
1409  .decode = cfhd_decode,
1411  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1413 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
int channel_cnt
Definition: cfhd.h:170
#define NULL
Definition: coverity.c:32
int difference_coding
Definition: cfhd.h:178
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:366
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
VLC vlc_18
Definition: cfhd.h:148
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVCodecContext * avctx
Definition: cfhd.h:142
CFHDDSPContext dsp
Definition: cfhd.h:187
Definition: cfhd.h:86
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
TransformType
Definition: webp.c:110
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:129
float re
Definition: fft.c:82
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVFrame * f
Definition: thread.h:35
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
Definition: cfhd.h:89
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
int a_height
Definition: cfhd.h:115
int level
Definition: cfhd.h:136
Definition: cfhd.h:72
int size
Definition: packet.h:364
int transform_type
Definition: cfhd.h:158
int cropped_height
Definition: cfhd.h:161
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
int16_t * idwt_tmp
Definition: cfhd.h:125
int a_width
Definition: cfhd.h:165
ptrdiff_t stride
Definition: cfhd.h:112
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
uint8_t run
Definition: svq3.c:204
int subband_num_actual
Definition: cfhd.h:181
#define VLC_BITS
Definition: cfhd.h:98
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:208
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1762
int sample_type
Definition: cfhd.h:157
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:195
Definition: cfhd.h:83
Macro definitions for various function/variable attributes.
int width
Definition: cfhd.h:114
int16_t * subband[SUBBAND_COUNT_3D]
Definition: cfhd.h:129
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
#define av_cold
Definition: attributes.h:88
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t * idwt_buf
Definition: cfhd.h:124
int a_format
Definition: cfhd.h:167
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:238
#define height
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1597
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
#define FFALIGN(x, a)
Definition: macros.h:48
#define SUBBAND_COUNT
Definition: cfhd.h:99
#define av_log(a,...)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
int16_t * l_h[10]
Definition: cfhd.h:130
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
Definition: cfhd.h:147
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
Definition: cfhd.h:144
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
FrameType
G723.1 frame types.
Definition: g723_1.h:63
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define R
Definition: huffyuvdsp.h:34
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:132
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:126
VLC vlc_9
Definition: cfhd.h:145
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define DWT_LEVELS
Definition: cfhd.h:108
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
Definition: get_bits.h:738
Definition: cfhd.h:135
int a_width
Definition: cfhd.h:113
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1353
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:192
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:87
int16_t SampleType
Definition: ac3enc.h:70
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
#define b
Definition: input.c:41
GetByteContext base
Definition: cfhd.h:138
int subband_cnt
Definition: cfhd.h:171
#define width
#define FFSIGN(a)
Definition: common.h:73
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:73
uint8_t prescale_table[8]
Definition: cfhd.h:183
uint16_t quantisation
Definition: cfhd.h:175
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int frame_type
Definition: cfhd.h:155
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:43
#define FF_ARRAY_ELEMS(a)
int channel_num
Definition: cfhd.h:173
if(ret)
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:151
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define DWT_LEVELS_3D
Definition: cfhd.h:109
GetBitContext gb
Definition: cfhd.h:152
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:531
int codebook
Definition: cfhd.h:177
static const struct @324 planes[]
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:106
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:117
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int planes
Definition: cfhd.h:154
int idwt_size
Definition: cfhd.h:126
int subband_num
Definition: cfhd.h:179
int coded_height
Definition: avcodec.h:719
enum AVPixelFormat coded_format
Definition: cfhd.h:162
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1401
refcounted data buffer API
static const int factor[16]
Definition: vf_pp7.c:75
Peak peak
Definition: cfhd.h:185
int band_encoding
Definition: cfhd.h:172
int level
Definition: cfhd.h:180
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:109
int frame_index
Definition: cfhd.h:156
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
uint8_t level
Definition: svq3.c:205
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
int coded_width
Definition: cfhd.h:159
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
common internal and external API header
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:137
ptrdiff_t stride
Definition: cfhd.h:122
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int bpc
Definition: cfhd.h:169
void * priv_data
Definition: avcodec.h:558
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
int a_height
Definition: cfhd.h:166
#define SUBBAND_COUNT_3D
Definition: cfhd.h:100
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:80
int height
Definition: cfhd.h:116
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
int width
Definition: cfhd.h:120
int offset
Definition: cfhd.h:137
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:219
int height
Definition: frame.h:372
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
int progressive
Definition: cfhd.h:163
#define av_freep(p)
#define av_malloc_array(a, b)
int lut[2][256]
Definition: cfhd.h:150
#define stride
Plane plane[4]
Definition: cfhd.h:184
int height
Definition: cfhd.h:121
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
Definition: cfhd.h:174
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:365
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:407
int coded_height
Definition: cfhd.h:160
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Tag MUST be even
Definition: snow.txt:206