FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/buffer.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "cfhd.h"
39 
40 #define ALPHA_COMPAND_DC_OFFSET 256
41 #define ALPHA_COMPAND_GAIN 9400
42 
43 static av_cold int cfhd_init(AVCodecContext *avctx)
44 {
45  CFHDContext *s = avctx->priv_data;
46 
47  s->avctx = avctx;
48 
49  for (int i = 0; i < 64; i++) {
50  int val = i;
51 
52  if (val >= 40) {
53  if (val >= 54) {
54  val -= 54;
55  val <<= 2;
56  val += 54;
57  }
58 
59  val -= 40;
60  val <<= 2;
61  val += 40;
62  }
63 
64  s->lut[0][i] = val;
65  }
66 
67  for (int i = 0; i < 256; i++)
68  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
69 
70  return ff_cfhd_init_vlcs(s);
71 }
72 
74 {
75  s->subband_num = 0;
76  s->level = 0;
77  s->subband_num_actual = 0;
78 }
79 
81 {
82  s->peak.level = 0;
83  s->peak.offset = 0;
84  memset(&s->peak.base, 0, sizeof(s->peak.base));
85 }
86 
88 {
89  s->coded_width = 0;
90  s->coded_height = 0;
92  s->cropped_height = 0;
93  s->bpc = 10;
94  s->channel_cnt = 3;
96  s->channel_num = 0;
97  s->lowpass_precision = 16;
98  s->quantisation = 1;
99  s->codebook = 0;
100  s->difference_coding = 0;
101  s->frame_type = 0;
102  s->sample_type = 0;
103  if (s->transform_type != 2)
104  s->transform_type = -1;
107 }
108 
109 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
110 {
111  if (codebook == 0 || codebook == 1) {
112  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
113  } else
114  return level * quantisation;
115 }
116 
117 static inline void difference_coding(int16_t *band, int width, int height)
118 {
119 
120  int i,j;
121  for (i = 0; i < height; i++) {
122  for (j = 1; j < width; j++) {
123  band[j] += band[j-1];
124  }
125  band += width;
126  }
127 }
128 
129 static inline void peak_table(int16_t *band, Peak *peak, int length)
130 {
131  int i;
132  for (i = 0; i < length; i++)
133  if (abs(band[i]) > peak->level)
134  band[i] = bytestream2_get_le16(&peak->base);
135 }
136 
137 static inline void process_alpha(int16_t *alpha, int width)
138 {
139  int i, channel;
140  for (i = 0; i < width; i++) {
141  channel = alpha[i];
142  channel -= ALPHA_COMPAND_DC_OFFSET;
143  channel <<= 3;
144  channel *= ALPHA_COMPAND_GAIN;
145  channel >>= 16;
146  channel = av_clip_uintp2(channel, 12);
147  alpha[i] = channel;
148  }
149 }
150 
151 static inline void process_bayer(AVFrame *frame, int bpc)
152 {
153  const int linesize = frame->linesize[0];
154  uint16_t *r = (uint16_t *)frame->data[0];
155  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
156  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
157  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
158  const int mid = 1 << (bpc - 1);
159  const int factor = 1 << (16 - bpc);
160 
161  for (int y = 0; y < frame->height >> 1; y++) {
162  for (int x = 0; x < frame->width; x += 2) {
163  int R, G1, G2, B;
164  int g, rg, bg, gd;
165 
166  g = r[x];
167  rg = g1[x];
168  bg = g2[x];
169  gd = b[x];
170  gd -= mid;
171 
172  R = (rg - mid) * 2 + g;
173  G1 = g + gd;
174  G2 = g - gd;
175  B = (bg - mid) * 2 + g;
176 
177  R = av_clip_uintp2(R * factor, 16);
178  G1 = av_clip_uintp2(G1 * factor, 16);
179  G2 = av_clip_uintp2(G2 * factor, 16);
180  B = av_clip_uintp2(B * factor, 16);
181 
182  r[x] = R;
183  g1[x] = G1;
184  g2[x] = G2;
185  b[x] = B;
186  }
187 
188  r += linesize;
189  g1 += linesize;
190  g2 += linesize;
191  b += linesize;
192  }
193 }
194 
195 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
196  int width, int linesize, int plane)
197 {
198  int i;
199  int16_t even, odd;
200  for (i = 0; i < width; i++) {
201  even = (low[i] - high[i])/2;
202  odd = (low[i] + high[i])/2;
203  output[i] = av_clip_uintp2(even, 10);
204  output[i + linesize] = av_clip_uintp2(odd, 10);
205  }
206 }
207 
208 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
209 {
210  for (int i = 0; i < width; i++) {
211  int even = (low[i] - high[i]) / 2;
212  int odd = (low[i] + high[i]) / 2;
213 
214  low[i] = even;
215  high[i] = odd;
216  }
217 }
218 
220 {
221  int i, j;
222 
223  for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
224  av_freep(&s->plane[i].idwt_buf);
225  av_freep(&s->plane[i].idwt_tmp);
226  s->plane[i].idwt_size = 0;
227 
228  for (j = 0; j < SUBBAND_COUNT_3D; j++)
229  s->plane[i].subband[j] = NULL;
230 
231  for (j = 0; j < 10; j++)
232  s->plane[i].l_h[j] = NULL;
233  }
234  s->a_height = 0;
235  s->a_width = 0;
236 }
237 
238 static int alloc_buffers(AVCodecContext *avctx)
239 {
240  CFHDContext *s = avctx->priv_data;
241  int i, j, ret, planes, bayer = 0;
242  int chroma_x_shift, chroma_y_shift;
243  unsigned k;
244 
245  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
246  return ret;
247  avctx->pix_fmt = s->coded_format;
248 
250 
252  &chroma_x_shift,
253  &chroma_y_shift)) < 0)
254  return ret;
257  planes = 4;
258  chroma_x_shift = 1;
259  chroma_y_shift = 1;
260  bayer = 1;
261  }
262 
263  for (i = 0; i < planes; i++) {
264  int w8, h8, w4, h4, w2, h2;
265  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
266  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
267  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
268 
269  if (chroma_y_shift && !bayer)
270  height = FFALIGN(height / 8, 2) * 8;
271  s->plane[i].width = width;
272  s->plane[i].height = height;
273  s->plane[i].stride = stride;
274 
275  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
276  h8 = FFALIGN(height, 8) / 8;
277  w4 = w8 * 2;
278  h4 = h8 * 2;
279  w2 = w4 * 2;
280  h2 = h4 * 2;
281 
282  if (s->transform_type == 0) {
283  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
284  s->plane[i].idwt_buf =
285  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
286  s->plane[i].idwt_tmp =
287  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
288  } else {
289  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
290  s->plane[i].idwt_buf =
291  av_mallocz_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
292  s->plane[i].idwt_tmp =
293  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
294  }
295 
296  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
297  return AVERROR(ENOMEM);
298 
299  s->plane[i].subband[0] = s->plane[i].idwt_buf;
300  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
301  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
302  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
303  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
304  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
305  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
306  if (s->transform_type == 0) {
307  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
308  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
309  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
310  } else {
311  int16_t *frame2 =
312  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
313  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
314  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
315  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
316  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
317  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
318  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
319  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
320  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
321  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
322  }
323 
324  if (s->transform_type == 0) {
325  for (j = 0; j < DWT_LEVELS; j++) {
326  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
327  s->plane[i].band[j][k].a_width = w8 << j;
328  s->plane[i].band[j][k].a_height = h8 << j;
329  }
330  }
331  } else {
332  for (j = 0; j < DWT_LEVELS_3D; j++) {
333  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
334 
335  for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
336  s->plane[i].band[j][k].a_width = w8 << t;
337  s->plane[i].band[j][k].a_height = h8 << t;
338  }
339  }
340  }
341 
342  /* ll2 and ll1 commented out because they are done in-place */
343  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
344  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
345  // s->plane[i].l_h[2] = ll2;
346  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
347  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
348  // s->plane[i].l_h[5] = ll1;
349  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
350  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
351  if (s->transform_type != 0) {
352  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
353 
354  s->plane[i].l_h[8] = frame2;
355  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
356  }
357  }
358 
359  s->a_height = s->coded_height;
360  s->a_width = s->coded_width;
361  s->a_format = s->coded_format;
362 
363  return 0;
364 }
365 
366 static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
367  AVPacket *avpkt)
368 {
369  CFHDContext *s = avctx->priv_data;
370  CFHDDSPContext *dsp = &s->dsp;
371  GetByteContext gb;
372  ThreadFrame frame = { .f = data };
373  AVFrame *pic = data;
374  int ret = 0, i, j, plane, got_buffer = 0;
375  int16_t *coeff_data;
376 
379 
380  bytestream2_init(&gb, avpkt->data, avpkt->size);
381 
382  while (bytestream2_get_bytes_left(&gb) >= 4) {
383  /* Bit weird but implement the tag parsing as the spec says */
384  uint16_t tagu = bytestream2_get_be16(&gb);
385  int16_t tag = (int16_t)tagu;
386  int8_t tag8 = (int8_t)(tagu >> 8);
387  uint16_t abstag = abs(tag);
388  int8_t abs_tag8 = abs(tag8);
389  uint16_t data = bytestream2_get_be16(&gb);
390  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
391  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
392  } else if (tag == SampleFlags) {
393  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
394  s->progressive = data & 0x0001;
395  } else if (tag == FrameType) {
396  s->frame_type = data;
397  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
398  } else if (abstag == VersionMajor) {
399  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
400  } else if (abstag == VersionMinor) {
401  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
402  } else if (abstag == VersionRevision) {
403  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
404  } else if (abstag == VersionEdit) {
405  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
406  } else if (abstag == Version) {
407  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
408  } else if (tag == ImageWidth) {
409  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
410  s->coded_width = data;
411  } else if (tag == ImageHeight) {
412  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
413  s->coded_height = data;
414  } else if (tag == ChannelCount) {
415  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
416  s->channel_cnt = data;
417  if (data > 4) {
418  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
419  ret = AVERROR_PATCHWELCOME;
420  goto end;
421  }
422  } else if (tag == SubbandCount) {
423  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
424  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
425  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
426  ret = AVERROR_PATCHWELCOME;
427  goto end;
428  }
429  } else if (tag == ChannelNumber) {
430  s->channel_num = data;
431  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
432  if (s->channel_num >= s->planes) {
433  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
434  ret = AVERROR(EINVAL);
435  goto end;
436  }
438  } else if (tag == SubbandNumber) {
439  if (s->subband_num != 0 && data == 1 && (s->transform_type == 0 || s->transform_type == 2)) // hack
440  s->level++;
441  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
442  s->subband_num = data;
443  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
444  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
445  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
446  ret = AVERROR(EINVAL);
447  goto end;
448  }
449  if (s->subband_num > 3) {
450  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
451  ret = AVERROR(EINVAL);
452  goto end;
453  }
454  } else if (tag == SubbandBand) {
455  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
456  if ((s->transform_type == 0 && data >= SUBBAND_COUNT) ||
457  (s->transform_type == 2 && data >= SUBBAND_COUNT_3D && data != 255)) {
458  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
459  ret = AVERROR(EINVAL);
460  goto end;
461  }
462  if (s->transform_type == 0 || s->transform_type == 2)
464  else
465  av_log(avctx, AV_LOG_WARNING, "Ignoring subband num actual %"PRIu16"\n", data);
466  } else if (tag == LowpassPrecision)
467  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
468  else if (tag == Quantization) {
469  s->quantisation = data;
470  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
471  } else if (tag == PrescaleTable) {
472  for (i = 0; i < 8; i++)
473  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
474  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
475  } else if (tag == BandEncoding) {
476  if (!data || data > 5) {
477  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
478  ret = AVERROR(EINVAL);
479  goto end;
480  }
481  s->band_encoding = data;
482  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
483  } else if (tag == LowpassWidth) {
484  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
485  s->plane[s->channel_num].band[0][0].width = data;
486  s->plane[s->channel_num].band[0][0].stride = data;
487  } else if (tag == LowpassHeight) {
488  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
489  s->plane[s->channel_num].band[0][0].height = data;
490  } else if (tag == SampleType) {
491  s->sample_type = data;
492  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
493  } else if (tag == TransformType) {
494  if (data > 2) {
495  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
496  ret = AVERROR(EINVAL);
497  goto end;
498  } else if (data == 1) {
499  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
500  ret = AVERROR_PATCHWELCOME;
501  goto end;
502  }
503  if (s->transform_type == -1) {
504  s->transform_type = data;
505  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
506  } else {
507  av_log(avctx, AV_LOG_DEBUG, "Ignoring additional transform type %"PRIu16"\n", data);
508  }
509  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
510  if (abstag == 0x4001)
511  s->peak.level = 0;
512  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
513  bytestream2_skipu(&gb, data * 4);
514  } else if (tag == FrameIndex) {
515  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
516  s->frame_index = data;
517  } else if (tag == SampleIndexTable) {
518  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
519  if (data > bytestream2_get_bytes_left(&gb) / 4) {
520  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
521  ret = AVERROR_INVALIDDATA;
522  goto end;
523  }
524  for (i = 0; i < data; i++) {
525  uint32_t offset = bytestream2_get_be32(&gb);
526  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
527  }
528  } else if (tag == HighpassWidth) {
529  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
530  if (data < 3) {
531  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
532  ret = AVERROR(EINVAL);
533  goto end;
534  }
535  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
536  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
537  } else if (tag == HighpassHeight) {
538  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
539  if (data < 3) {
540  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
541  ret = AVERROR(EINVAL);
542  goto end;
543  }
544  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
545  } else if (tag == BandWidth) {
546  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
547  if (data < 3) {
548  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
549  ret = AVERROR(EINVAL);
550  goto end;
551  }
552  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
553  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
554  } else if (tag == BandHeight) {
555  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
556  if (data < 3) {
557  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
558  ret = AVERROR(EINVAL);
559  goto end;
560  }
561  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
562  } else if (tag == InputFormat) {
563  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
564  if (s->coded_format == AV_PIX_FMT_NONE ||
566  if (data >= 100 && data <= 105) {
568  } else if (data >= 122 && data <= 128) {
570  } else if (data == 30) {
572  } else {
574  }
576  }
577  } else if (tag == BandCodingFlags) {
578  s->codebook = data & 0xf;
579  s->difference_coding = (data >> 4) & 1;
580  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
581  } else if (tag == Precision) {
582  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
583  if (!(data == 10 || data == 12)) {
584  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
585  ret = AVERROR(EINVAL);
586  goto end;
587  }
588  avctx->bits_per_raw_sample = s->bpc = data;
589  } else if (tag == EncodedFormat) {
590  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
591  if (data == 1) {
593  } else if (data == 2) {
595  } else if (data == 3) {
597  } else if (data == 4) {
599  } else {
600  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
601  ret = AVERROR_PATCHWELCOME;
602  goto end;
603  }
604  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
605  } else if (tag == -DisplayHeight) {
606  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
607  s->cropped_height = data;
608  } else if (tag == -PeakOffsetLow) {
609  s->peak.offset &= ~0xffff;
610  s->peak.offset |= (data & 0xffff);
611  s->peak.base = gb;
612  s->peak.level = 0;
613  } else if (tag == -PeakOffsetHigh) {
614  s->peak.offset &= 0xffff;
615  s->peak.offset |= (data & 0xffffU)<<16;
616  s->peak.base = gb;
617  s->peak.level = 0;
618  } else if (tag == -PeakLevel && s->peak.offset) {
619  s->peak.level = data;
620  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
622  ) {
623  ret = AVERROR_INVALIDDATA;
624  goto end;
625  }
626  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
627  } else
628  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
629 
630  if (tag == BitstreamMarker && data == 0xf0f &&
632  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
633  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
634  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
635 
636  if (s->coded_width) {
637  s->coded_width *= factor;
638  }
639 
640  if (s->coded_height) {
641  s->coded_height *= factor;
642  }
643 
644  if (!s->a_width && !s->coded_width) {
645  s->coded_width = lowpass_width * factor * 8;
646  }
647 
648  if (!s->a_height && !s->coded_height) {
649  s->coded_height = lowpass_height * factor * 8;
650  }
651 
652  if (s->a_width && !s->coded_width)
653  s->coded_width = s->a_width;
654  if (s->a_height && !s->coded_height)
655  s->coded_height = s->a_height;
656 
657  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
658  s->a_format != s->coded_format) {
659  free_buffers(s);
660  if ((ret = alloc_buffers(avctx)) < 0) {
661  free_buffers(s);
662  return ret;
663  }
664  }
665  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
666  if (ret < 0)
667  return ret;
668  if (s->cropped_height) {
669  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
670  if (avctx->height < height)
671  return AVERROR_INVALIDDATA;
672  avctx->height = height;
673  }
674  frame.f->width =
675  frame.f->height = 0;
676 
677  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
678  return ret;
679 
680  s->coded_width = 0;
681  s->coded_height = 0;
683  got_buffer = 1;
684  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
685  frame.f->width =
686  frame.f->height = 0;
687 
688  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
689  return ret;
690  s->coded_width = 0;
691  s->coded_height = 0;
693  got_buffer = 1;
694  }
695 
696  if (s->subband_num_actual == 255)
697  goto finish;
698  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
699 
700  /* Lowpass coefficients */
701  if (tag == BitstreamMarker && data == 0xf0f && s->a_width && s->a_height) {
702  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
703  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
704  int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
705  int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
706 
707  if (lowpass_width < 3 ||
708  lowpass_width > lowpass_a_width) {
709  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
710  ret = AVERROR(EINVAL);
711  goto end;
712  }
713 
714  if (lowpass_height < 3 ||
715  lowpass_height > lowpass_a_height) {
716  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
717  ret = AVERROR(EINVAL);
718  goto end;
719  }
720 
721  if (!got_buffer) {
722  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
723  ret = AVERROR(EINVAL);
724  goto end;
725  }
726 
727  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
728  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
729  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
730  ret = AVERROR(EINVAL);
731  goto end;
732  }
733 
734  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
735  for (i = 0; i < lowpass_height; i++) {
736  for (j = 0; j < lowpass_width; j++)
737  coeff_data[j] = bytestream2_get_be16u(&gb);
738 
739  coeff_data += lowpass_width;
740  }
741 
742  /* Align to mod-4 position to continue reading tags */
743  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
744 
745  /* Copy last line of coefficients if odd height */
746  if (lowpass_height & 1) {
747  memcpy(&coeff_data[lowpass_height * lowpass_width],
748  &coeff_data[(lowpass_height - 1) * lowpass_width],
749  lowpass_width * sizeof(*coeff_data));
750  }
751 
752  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
753  }
754 
755  if ((tag == BandHeader || tag == BandSecondPass) && s->subband_num_actual != 255 && s->a_width && s->a_height) {
756  int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
757  int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
758  int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
759  int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
760  int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
761  int expected;
762  int a_expected = highpass_a_height * highpass_a_width;
763  int level, run, coeff;
764  int count = 0, bytes;
765 
766  if (!got_buffer) {
767  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
768  ret = AVERROR(EINVAL);
769  goto end;
770  }
771 
772  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
773  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
774  ret = AVERROR(EINVAL);
775  goto end;
776  }
777  expected = highpass_height * highpass_stride;
778 
779  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
780 
781  ret = init_get_bits8(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb));
782  if (ret < 0)
783  goto end;
784  {
785  OPEN_READER(re, &s->gb);
786 
787  const int lossless = s->band_encoding == 5;
788 
789  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
790  s->codebook = 1;
791  if (!s->codebook) {
792  while (1) {
793  UPDATE_CACHE(re, &s->gb);
794  GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
795  VLC_BITS, 3, 1);
796 
797  /* escape */
798  if (level == 64)
799  break;
800 
801  count += run;
802 
803  if (count > expected)
804  break;
805 
806  if (!lossless)
807  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
808  else
809  coeff = level;
810  if (tag == BandSecondPass) {
811  const uint16_t q = s->quantisation;
812 
813  for (i = 0; i < run; i++) {
814  *coeff_data |= coeff << 8;
815  *coeff_data++ *= q;
816  }
817  } else {
818  for (i = 0; i < run; i++)
819  *coeff_data++ = coeff;
820  }
821  }
822  } else {
823  while (1) {
824  UPDATE_CACHE(re, &s->gb);
825  GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
826  VLC_BITS, 3, 1);
827 
828  /* escape */
829  if (level == 255 && run == 2)
830  break;
831 
832  count += run;
833 
834  if (count > expected)
835  break;
836 
837  if (!lossless)
838  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
839  else
840  coeff = level;
841  if (tag == BandSecondPass) {
842  const uint16_t q = s->quantisation;
843 
844  for (i = 0; i < run; i++) {
845  *coeff_data |= coeff << 8;
846  *coeff_data++ *= q;
847  }
848  } else {
849  for (i = 0; i < run; i++)
850  *coeff_data++ = coeff;
851  }
852  }
853  }
854  CLOSE_READER(re, &s->gb);
855  }
856 
857  if (count > expected) {
858  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
859  ret = AVERROR(EINVAL);
860  goto end;
861  }
862  if (s->peak.level)
863  peak_table(coeff_data - count, &s->peak, count);
864  if (s->difference_coding)
865  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
866 
867  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
868  if (bytes > bytestream2_get_bytes_left(&gb)) {
869  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
870  ret = AVERROR(EINVAL);
871  goto end;
872  } else
873  bytestream2_seek(&gb, bytes, SEEK_CUR);
874 
875  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
876 finish:
877  if (s->subband_num_actual != 255)
878  s->codebook = 0;
879  }
880  }
881 
883  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
884  s->progressive = 1;
885  s->planes = 4;
886  }
887 
888  ff_thread_finish_setup(avctx);
889 
890  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
892  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
893  ret = AVERROR(EINVAL);
894  goto end;
895  }
896 
897  if (!got_buffer) {
898  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
899  ret = AVERROR(EINVAL);
900  goto end;
901  }
902 
903  if (s->transform_type == 0 && s->sample_type != 1) {
904  for (plane = 0; plane < s->planes && !ret; plane++) {
905  /* level 1 */
906  int lowpass_height = s->plane[plane].band[0][0].height;
907  int output_stride = s->plane[plane].band[0][0].a_width;
908  int lowpass_width = s->plane[plane].band[0][0].width;
909  int highpass_stride = s->plane[plane].band[0][1].stride;
910  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
911  ptrdiff_t dst_linesize;
912  int16_t *low, *high, *output, *dst;
913 
914  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
915  act_plane = 0;
916  dst_linesize = pic->linesize[act_plane];
917  } else {
918  dst_linesize = pic->linesize[act_plane] / 2;
919  }
920 
921  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
922  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
923  lowpass_width < 3 || lowpass_height < 3) {
924  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
925  ret = AVERROR(EINVAL);
926  goto end;
927  }
928 
929  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
930 
931  low = s->plane[plane].subband[0];
932  high = s->plane[plane].subband[2];
933  output = s->plane[plane].l_h[0];
934  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
935 
936  low = s->plane[plane].subband[1];
937  high = s->plane[plane].subband[3];
938  output = s->plane[plane].l_h[1];
939 
940  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
941 
942  low = s->plane[plane].l_h[0];
943  high = s->plane[plane].l_h[1];
944  output = s->plane[plane].subband[0];
945  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
946  if (s->bpc == 12) {
947  output = s->plane[plane].subband[0];
948  for (i = 0; i < lowpass_height * 2; i++) {
949  for (j = 0; j < lowpass_width * 2; j++)
950  output[j] *= 4;
951 
952  output += output_stride * 2;
953  }
954  }
955 
956  /* level 2 */
957  lowpass_height = s->plane[plane].band[1][1].height;
958  output_stride = s->plane[plane].band[1][1].a_width;
959  lowpass_width = s->plane[plane].band[1][1].width;
960  highpass_stride = s->plane[plane].band[1][1].stride;
961 
962  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
963  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
964  lowpass_width < 3 || lowpass_height < 3) {
965  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
966  ret = AVERROR(EINVAL);
967  goto end;
968  }
969 
970  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
971 
972  low = s->plane[plane].subband[0];
973  high = s->plane[plane].subband[5];
974  output = s->plane[plane].l_h[3];
975  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
976 
977  low = s->plane[plane].subband[4];
978  high = s->plane[plane].subband[6];
979  output = s->plane[plane].l_h[4];
980  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
981 
982  low = s->plane[plane].l_h[3];
983  high = s->plane[plane].l_h[4];
984  output = s->plane[plane].subband[0];
985  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
986 
987  output = s->plane[plane].subband[0];
988  for (i = 0; i < lowpass_height * 2; i++) {
989  for (j = 0; j < lowpass_width * 2; j++)
990  output[j] *= 4;
991 
992  output += output_stride * 2;
993  }
994 
995  /* level 3 */
996  lowpass_height = s->plane[plane].band[2][1].height;
997  output_stride = s->plane[plane].band[2][1].a_width;
998  lowpass_width = s->plane[plane].band[2][1].width;
999  highpass_stride = s->plane[plane].band[2][1].stride;
1000 
1001  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
1002  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width ||
1003  lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->plane[plane].width) {
1004  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1005  ret = AVERROR(EINVAL);
1006  goto end;
1007  }
1008 
1009  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1010  if (s->progressive) {
1011  low = s->plane[plane].subband[0];
1012  high = s->plane[plane].subband[8];
1013  output = s->plane[plane].l_h[6];
1014  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1015 
1016  low = s->plane[plane].subband[7];
1017  high = s->plane[plane].subband[9];
1018  output = s->plane[plane].l_h[7];
1019  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1020 
1021  dst = (int16_t *)pic->data[act_plane];
1022  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1023  if (plane & 1)
1024  dst++;
1025  if (plane > 1)
1026  dst += pic->linesize[act_plane] >> 1;
1027  }
1028  low = s->plane[plane].l_h[6];
1029  high = s->plane[plane].l_h[7];
1030 
1031  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1032  (lowpass_height * 2 > avctx->coded_height / 2 ||
1033  lowpass_width * 2 > avctx->coded_width / 2 )
1034  ) {
1035  ret = AVERROR_INVALIDDATA;
1036  goto end;
1037  }
1038 
1039  for (i = 0; i < s->plane[act_plane].height; i++) {
1040  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1041  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1042  process_alpha(dst, lowpass_width * 2);
1043  low += output_stride;
1044  high += output_stride;
1045  dst += dst_linesize;
1046  }
1047  } else {
1048  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
1049  pic->interlaced_frame = 1;
1050  low = s->plane[plane].subband[0];
1051  high = s->plane[plane].subband[7];
1052  output = s->plane[plane].l_h[6];
1053  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1054 
1055  low = s->plane[plane].subband[8];
1056  high = s->plane[plane].subband[9];
1057  output = s->plane[plane].l_h[7];
1058  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1059 
1060  dst = (int16_t *)pic->data[act_plane];
1061  low = s->plane[plane].l_h[6];
1062  high = s->plane[plane].l_h[7];
1063  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1064  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1065  low += output_stride * 2;
1066  high += output_stride * 2;
1067  dst += pic->linesize[act_plane];
1068  }
1069  }
1070  }
1071  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1072  for (plane = 0; plane < s->planes && !ret; plane++) {
1073  int lowpass_height = s->plane[plane].band[0][0].height;
1074  int output_stride = s->plane[plane].band[0][0].a_width;
1075  int lowpass_width = s->plane[plane].band[0][0].width;
1076  int highpass_stride = s->plane[plane].band[0][1].stride;
1077  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1078  int16_t *low, *high, *output, *dst;
1079  ptrdiff_t dst_linesize;
1080 
1081  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1082  act_plane = 0;
1083  dst_linesize = pic->linesize[act_plane];
1084  } else {
1085  dst_linesize = pic->linesize[act_plane] / 2;
1086  }
1087 
1088  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1089  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
1090  lowpass_width < 3 || lowpass_height < 3) {
1091  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1092  ret = AVERROR(EINVAL);
1093  goto end;
1094  }
1095 
1096  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1097 
1098  low = s->plane[plane].subband[0];
1099  high = s->plane[plane].subband[2];
1100  output = s->plane[plane].l_h[0];
1101  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1102 
1103  low = s->plane[plane].subband[1];
1104  high = s->plane[plane].subband[3];
1105  output = s->plane[plane].l_h[1];
1106  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1107 
1108  low = s->plane[plane].l_h[0];
1109  high = s->plane[plane].l_h[1];
1110  output = s->plane[plane].l_h[7];
1111  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1112  if (s->bpc == 12) {
1113  output = s->plane[plane].l_h[7];
1114  for (i = 0; i < lowpass_height * 2; i++) {
1115  for (j = 0; j < lowpass_width * 2; j++)
1116  output[j] *= 4;
1117 
1118  output += output_stride * 2;
1119  }
1120  }
1121 
1122  lowpass_height = s->plane[plane].band[1][1].height;
1123  output_stride = s->plane[plane].band[1][1].a_width;
1124  lowpass_width = s->plane[plane].band[1][1].width;
1125  highpass_stride = s->plane[plane].band[1][1].stride;
1126 
1127  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1128  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1129  lowpass_width < 3 || lowpass_height < 3) {
1130  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1131  ret = AVERROR(EINVAL);
1132  goto end;
1133  }
1134 
1135  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1136 
1137  low = s->plane[plane].l_h[7];
1138  high = s->plane[plane].subband[5];
1139  output = s->plane[plane].l_h[3];
1140  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1141 
1142  low = s->plane[plane].subband[4];
1143  high = s->plane[plane].subband[6];
1144  output = s->plane[plane].l_h[4];
1145  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1146 
1147  low = s->plane[plane].l_h[3];
1148  high = s->plane[plane].l_h[4];
1149  output = s->plane[plane].l_h[7];
1150  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1151 
1152  output = s->plane[plane].l_h[7];
1153  for (i = 0; i < lowpass_height * 2; i++) {
1154  for (j = 0; j < lowpass_width * 2; j++)
1155  output[j] *= 4;
1156  output += output_stride * 2;
1157  }
1158 
1159  low = s->plane[plane].subband[7];
1160  high = s->plane[plane].subband[9];
1161  output = s->plane[plane].l_h[3];
1162  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1163 
1164  low = s->plane[plane].subband[8];
1165  high = s->plane[plane].subband[10];
1166  output = s->plane[plane].l_h[4];
1167  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1168 
1169  low = s->plane[plane].l_h[3];
1170  high = s->plane[plane].l_h[4];
1171  output = s->plane[plane].l_h[9];
1172  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1173 
1174  lowpass_height = s->plane[plane].band[4][1].height;
1175  output_stride = s->plane[plane].band[4][1].a_width;
1176  lowpass_width = s->plane[plane].band[4][1].width;
1177  highpass_stride = s->plane[plane].band[4][1].stride;
1178  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1179 
1180  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1181  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1182  lowpass_width < 3 || lowpass_height < 3) {
1183  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1184  ret = AVERROR(EINVAL);
1185  goto end;
1186  }
1187 
1188  low = s->plane[plane].l_h[7];
1189  high = s->plane[plane].l_h[9];
1190  output = s->plane[plane].l_h[7];
1191  for (i = 0; i < lowpass_height; i++) {
1192  inverse_temporal_filter(low, high, lowpass_width);
1193  low += output_stride;
1194  high += output_stride;
1195  }
1196  if (s->progressive) {
1197  low = s->plane[plane].l_h[7];
1198  high = s->plane[plane].subband[15];
1199  output = s->plane[plane].l_h[6];
1200  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1201 
1202  low = s->plane[plane].subband[14];
1203  high = s->plane[plane].subband[16];
1204  output = s->plane[plane].l_h[7];
1205  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1206 
1207  low = s->plane[plane].l_h[9];
1208  high = s->plane[plane].subband[12];
1209  output = s->plane[plane].l_h[8];
1210  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1211 
1212  low = s->plane[plane].subband[11];
1213  high = s->plane[plane].subband[13];
1214  output = s->plane[plane].l_h[9];
1215  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1216 
1217  if (s->sample_type == 1)
1218  continue;
1219 
1220  dst = (int16_t *)pic->data[act_plane];
1221  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1222  if (plane & 1)
1223  dst++;
1224  if (plane > 1)
1225  dst += pic->linesize[act_plane] >> 1;
1226  }
1227 
1228  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1229  (lowpass_height * 2 > avctx->coded_height / 2 ||
1230  lowpass_width * 2 > avctx->coded_width / 2 )
1231  ) {
1232  ret = AVERROR_INVALIDDATA;
1233  goto end;
1234  }
1235 
1236  low = s->plane[plane].l_h[6];
1237  high = s->plane[plane].l_h[7];
1238  for (i = 0; i < s->plane[act_plane].height; i++) {
1239  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1240  low += output_stride;
1241  high += output_stride;
1242  dst += dst_linesize;
1243  }
1244  } else {
1245  pic->interlaced_frame = 1;
1246  low = s->plane[plane].l_h[7];
1247  high = s->plane[plane].subband[14];
1248  output = s->plane[plane].l_h[6];
1249  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1250 
1251  low = s->plane[plane].subband[15];
1252  high = s->plane[plane].subband[16];
1253  output = s->plane[plane].l_h[7];
1254  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1255 
1256  low = s->plane[plane].l_h[9];
1257  high = s->plane[plane].subband[11];
1258  output = s->plane[plane].l_h[8];
1259  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1260 
1261  low = s->plane[plane].subband[12];
1262  high = s->plane[plane].subband[13];
1263  output = s->plane[plane].l_h[9];
1264  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1265 
1266  if (s->sample_type == 1)
1267  continue;
1268 
1269  dst = (int16_t *)pic->data[act_plane];
1270  low = s->plane[plane].l_h[6];
1271  high = s->plane[plane].l_h[7];
1272  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1273  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1274  low += output_stride * 2;
1275  high += output_stride * 2;
1276  dst += pic->linesize[act_plane];
1277  }
1278  }
1279  }
1280  }
1281 
1282  if (s->transform_type == 2 && s->sample_type == 1) {
1283  int16_t *low, *high, *dst;
1284  int output_stride, lowpass_height, lowpass_width;
1285  ptrdiff_t dst_linesize;
1286 
1287  for (plane = 0; plane < s->planes; plane++) {
1288  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1289 
1290  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1291  act_plane = 0;
1292  dst_linesize = pic->linesize[act_plane];
1293  } else {
1294  dst_linesize = pic->linesize[act_plane] / 2;
1295  }
1296 
1297  lowpass_height = s->plane[plane].band[4][1].height;
1298  output_stride = s->plane[plane].band[4][1].a_width;
1299  lowpass_width = s->plane[plane].band[4][1].width;
1300 
1301  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1302  s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1303  lowpass_width < 3 || lowpass_height < 3) {
1304  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1305  ret = AVERROR(EINVAL);
1306  goto end;
1307  }
1308 
1309  if (s->progressive) {
1310  dst = (int16_t *)pic->data[act_plane];
1311  low = s->plane[plane].l_h[8];
1312  high = s->plane[plane].l_h[9];
1313 
1314  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1315  if (plane & 1)
1316  dst++;
1317  if (plane > 1)
1318  dst += pic->linesize[act_plane] >> 1;
1319  }
1320 
1321  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1322  (lowpass_height * 2 > avctx->coded_height / 2 ||
1323  lowpass_width * 2 > avctx->coded_width / 2 )
1324  ) {
1325  ret = AVERROR_INVALIDDATA;
1326  goto end;
1327  }
1328 
1329  for (i = 0; i < s->plane[act_plane].height; i++) {
1330  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1331  low += output_stride;
1332  high += output_stride;
1333  dst += dst_linesize;
1334  }
1335  } else {
1336  dst = (int16_t *)pic->data[act_plane];
1337  low = s->plane[plane].l_h[8];
1338  high = s->plane[plane].l_h[9];
1339  for (i = 0; i < s->plane[act_plane].height / 2; i++) {
1340  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1341  low += output_stride * 2;
1342  high += output_stride * 2;
1343  dst += pic->linesize[act_plane];
1344  }
1345  }
1346  }
1347  }
1348 
1349  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1350  process_bayer(pic, s->bpc);
1351 end:
1352  if (ret < 0)
1353  return ret;
1354 
1355  *got_frame = 1;
1356  return avpkt->size;
1357 }
1358 
1360 {
1361  CFHDContext *s = avctx->priv_data;
1362 
1363  free_buffers(s);
1364 
1365  ff_free_vlc(&s->vlc_9);
1366  ff_free_vlc(&s->vlc_18);
1367 
1368  return 0;
1369 }
1370 
1371 #if HAVE_THREADS
1372 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1373 {
1374  CFHDContext *psrc = src->priv_data;
1375  CFHDContext *pdst = dst->priv_data;
1376  int ret;
1377 
1378  if (dst == src || psrc->transform_type == 0)
1379  return 0;
1380 
1381  if (pdst->plane[0].idwt_size != psrc->plane[0].idwt_size ||
1382  pdst->a_format != psrc->a_format ||
1383  pdst->a_width != psrc->a_width ||
1384  pdst->a_height != psrc->a_height)
1385  free_buffers(pdst);
1386 
1387  pdst->a_format = psrc->a_format;
1388  pdst->a_width = psrc->a_width;
1389  pdst->a_height = psrc->a_height;
1390  pdst->transform_type = psrc->transform_type;
1391  pdst->progressive = psrc->progressive;
1392  pdst->planes = psrc->planes;
1393 
1394  if (!pdst->plane[0].idwt_buf) {
1395  pdst->coded_width = pdst->a_width;
1396  pdst->coded_height = pdst->a_height;
1397  pdst->coded_format = pdst->a_format;
1398  ret = alloc_buffers(dst);
1399  if (ret < 0)
1400  return ret;
1401  }
1402 
1403  for (int plane = 0; plane < pdst->planes; plane++) {
1404  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1405  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1406  pdst->plane[plane].idwt_size * sizeof(int16_t));
1407  }
1408 
1409  return 0;
1410 }
1411 #endif
1412 
1414  .name = "cfhd",
1415  .long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
1416  .type = AVMEDIA_TYPE_VIDEO,
1417  .id = AV_CODEC_ID_CFHD,
1418  .priv_data_size = sizeof(CFHDContext),
1419  .init = cfhd_init,
1420  .close = cfhd_close,
1421  .decode = cfhd_decode,
1423  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1425 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
int channel_cnt
Definition: cfhd.h:170
#define NULL
Definition: coverity.c:32
int difference_coding
Definition: cfhd.h:178
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:366
static const unsigned codebook[256][2]
Definition: cfhdenc.c:42
VLC vlc_18
Definition: cfhd.h:148
#define av_clip_uintp2
Definition: common.h:146
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVCodecContext * avctx
Definition: cfhd.h:142
CFHDDSPContext dsp
Definition: cfhd.h:187
Definition: cfhd.h:86
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
TransformType
Definition: webp.c:110
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:129
float re
Definition: fft.c:82
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVFrame * f
Definition: thread.h:35
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
const char * g
Definition: vf_curves.c:117
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
Definition: cfhd.h:89
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:41
int a_height
Definition: cfhd.h:115
int level
Definition: cfhd.h:136
Definition: cfhd.h:72
int size
Definition: packet.h:364
int transform_type
Definition: cfhd.h:158
int cropped_height
Definition: cfhd.h:161
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
int16_t * idwt_tmp
Definition: cfhd.h:125
#define FF_ARRAY_ELEMS(a)
int a_width
Definition: cfhd.h:165
ptrdiff_t stride
Definition: cfhd.h:112
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
uint8_t run
Definition: svq3.c:205
int subband_num_actual
Definition: cfhd.h:181
#define VLC_BITS
Definition: cfhd.h:98
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:208
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1742
int sample_type
Definition: cfhd.h:157
AVCodec.
Definition: codec.h:190
static const struct @322 planes[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:195
Definition: cfhd.h:83
Macro definitions for various function/variable attributes.
int width
Definition: cfhd.h:114
int16_t * subband[SUBBAND_COUNT_3D]
Definition: cfhd.h:129
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
#define av_cold
Definition: attributes.h:88
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t * idwt_buf
Definition: cfhd.h:124
int a_format
Definition: cfhd.h:167
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:238
#define height
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1597
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:461
#define FFALIGN(x, a)
Definition: macros.h:48
#define SUBBAND_COUNT
Definition: cfhd.h:99
#define av_log(a,...)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
int16_t * l_h[10]
Definition: cfhd.h:130
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
Definition: cfhd.h:147
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
Definition: cfhd.h:144
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:255
FrameType
G723.1 frame types.
Definition: g723_1.h:63
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define R
Definition: huffyuvdsp.h:34
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:132
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:126
VLC vlc_9
Definition: cfhd.h:145
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
const char * r
Definition: vf_curves.c:116
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define DWT_LEVELS
Definition: cfhd.h:108
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
GLsizei count
Definition: opengl_enc.c:108
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
Definition: get_bits.h:738
Definition: cfhd.h:135
int a_width
Definition: cfhd.h:113
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1359
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:154
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:87
int32_t SampleType
Definition: ac3enc.h:63
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
#define b
Definition: input.c:41
GetByteContext base
Definition: cfhd.h:138
int subband_cnt
Definition: cfhd.h:171
#define width
#define FFSIGN(a)
Definition: common.h:73
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:73
uint8_t prescale_table[8]
Definition: cfhd.h:183
uint16_t quantisation
Definition: cfhd.h:175
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int frame_type
Definition: cfhd.h:155
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:43
int channel_num
Definition: cfhd.h:173
if(ret)
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:151
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define DWT_LEVELS_3D
Definition: cfhd.h:109
GetBitContext gb
Definition: cfhd.h:152
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
static const int16_t alpha[]
Definition: ilbcdata.h:55
main external API structure.
Definition: avcodec.h:531
int codebook
Definition: cfhd.h:177
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:106
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:117
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int planes
Definition: cfhd.h:154
int idwt_size
Definition: cfhd.h:126
int subband_num
Definition: cfhd.h:179
int coded_height
Definition: avcodec.h:719
enum AVPixelFormat coded_format
Definition: cfhd.h:162
AVCodec ff_cfhd_decoder
Definition: cfhd.c:1413
refcounted data buffer API
static const int factor[16]
Definition: vf_pp7.c:77
Peak peak
Definition: cfhd.h:185
int band_encoding
Definition: cfhd.h:172
int level
Definition: cfhd.h:180
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:109
int frame_index
Definition: cfhd.h:156
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
uint8_t level
Definition: svq3.c:206
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
int coded_width
Definition: cfhd.h:159
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
common internal and external API header
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:137
ptrdiff_t stride
Definition: cfhd.h:122
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int bpc
Definition: cfhd.h:169
void * priv_data
Definition: avcodec.h:558
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
int a_height
Definition: cfhd.h:166
#define SUBBAND_COUNT_3D
Definition: cfhd.h:100
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:80
int height
Definition: cfhd.h:116
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
int width
Definition: cfhd.h:120
int offset
Definition: cfhd.h:137
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:219
int height
Definition: frame.h:372
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:276
int progressive
Definition: cfhd.h:163
#define av_freep(p)
#define av_malloc_array(a, b)
int lut[2][256]
Definition: cfhd.h:150
#define stride
Plane plane[4]
Definition: cfhd.h:184
int height
Definition: cfhd.h:121
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
Definition: cfhd.h:174
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:407
int coded_height
Definition: cfhd.h:160
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Tag MUST be even
Definition: snow.txt:206