FFmpeg
exr.c
Go to the documentation of this file.
1 /*
2  * OpenEXR (.exr) image decoder
3  * Copyright (c) 2006 Industrial Light & Magic, a division of Lucas Digital Ltd. LLC
4  * Copyright (c) 2009 Jimmy Christensen
5  *
6  * B44/B44A, Tile, UINT32 added by Jokyo Images support by CNC - French National Center for Cinema
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * OpenEXR decoder
28  * @author Jimmy Christensen
29  *
30  * For more information on the OpenEXR format, visit:
31  * http://openexr.com/
32  *
33  * exr_half2float() is credited to Aaftab Munshi, Dan Ginsburg, Dave Shreiner.
34  */
35 
36 #include <float.h>
37 #include <zlib.h>
38 
39 #include "libavutil/avassert.h"
40 #include "libavutil/common.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/intfloat.h"
43 #include "libavutil/avstring.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/color_utils.h"
46 
47 #include "avcodec.h"
48 #include "bytestream.h"
49 
50 #if HAVE_BIGENDIAN
51 #include "bswapdsp.h"
52 #endif
53 
54 #include "exrdsp.h"
55 #include "get_bits.h"
56 #include "internal.h"
57 #include "mathops.h"
58 #include "thread.h"
59 
60 enum ExrCompr {
72 };
73 
79 };
80 
86 };
87 
92 };
93 
94 typedef struct EXRChannel {
95  int xsub, ysub;
97 } EXRChannel;
98 
99 typedef struct EXRTileAttribute {
102  enum ExrTileLevelMode level_mode;
103  enum ExrTileLevelRound level_round;
105 
106 typedef struct EXRThreadData {
109 
111  int tmp_size;
112 
114  uint16_t *lut;
115 
116  int ysize, xsize;
117 
119 } EXRThreadData;
120 
121 typedef struct EXRContext {
122  AVClass *class;
126 
127 #if HAVE_BIGENDIAN
128  BswapDSPContext bbdsp;
129 #endif
130 
131  enum ExrCompr compression;
133  int channel_offsets[4]; // 0 = red, 1 = green, 2 = blue and 3 = alpha
135 
136  int w, h;
137  int32_t xmax, xmin;
138  int32_t ymax, ymin;
139  uint32_t xdelta, ydelta;
140 
142 
143  EXRTileAttribute tile_attr; /* header data attribute of tile */
144  int is_tile; /* 0 if scanline, 1 if tile */
145 
146  int is_luma;/* 1 if there is an Y plane */
147 
149  const uint8_t *buf;
150  int buf_size;
151 
155 
157 
158  const char *layer;
159 
160  enum AVColorTransferCharacteristic apply_trc_type;
161  float gamma;
162  union av_intfloat32 gamma_table[65536];
163 } EXRContext;
164 
165 /* -15 stored using a single precision bias of 127 */
166 #define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP 0x38000000
167 
168 /* max exponent value in single precision that will be converted
169  * to Inf or Nan when stored as a half-float */
170 #define HALF_FLOAT_MAX_BIASED_EXP_AS_SINGLE_FP_EXP 0x47800000
171 
172 /* 255 is the max exponent biased value */
173 #define FLOAT_MAX_BIASED_EXP (0xFF << 23)
174 
175 #define HALF_FLOAT_MAX_BIASED_EXP (0x1F << 10)
176 
177 /**
178  * Convert a half float as a uint16_t into a full float.
179  *
180  * @param hf half float as uint16_t
181  *
182  * @return float value
183  */
184 static union av_intfloat32 exr_half2float(uint16_t hf)
185 {
186  unsigned int sign = (unsigned int) (hf >> 15);
187  unsigned int mantissa = (unsigned int) (hf & ((1 << 10) - 1));
188  unsigned int exp = (unsigned int) (hf & HALF_FLOAT_MAX_BIASED_EXP);
189  union av_intfloat32 f;
190 
191  if (exp == HALF_FLOAT_MAX_BIASED_EXP) {
192  // we have a half-float NaN or Inf
193  // half-float NaNs will be converted to a single precision NaN
194  // half-float Infs will be converted to a single precision Inf
195  exp = FLOAT_MAX_BIASED_EXP;
196  if (mantissa)
197  mantissa = (1 << 23) - 1; // set all bits to indicate a NaN
198  } else if (exp == 0x0) {
199  // convert half-float zero/denorm to single precision value
200  if (mantissa) {
201  mantissa <<= 1;
203  // check for leading 1 in denorm mantissa
204  while (!(mantissa & (1 << 10))) {
205  // for every leading 0, decrement single precision exponent by 1
206  // and shift half-float mantissa value to the left
207  mantissa <<= 1;
208  exp -= (1 << 23);
209  }
210  // clamp the mantissa to 10 bits
211  mantissa &= ((1 << 10) - 1);
212  // shift left to generate single-precision mantissa of 23 bits
213  mantissa <<= 13;
214  }
215  } else {
216  // shift left to generate single-precision mantissa of 23 bits
217  mantissa <<= 13;
218  // generate single precision biased exponent value
219  exp = (exp << 13) + HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP;
220  }
221 
222  f.i = (sign << 31) | exp | mantissa;
223 
224  return f;
225 }
226 
227 static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size,
228  int uncompressed_size, EXRThreadData *td)
229 {
230  unsigned long dest_len = uncompressed_size;
231 
232  if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK ||
233  dest_len != uncompressed_size)
234  return AVERROR_INVALIDDATA;
235 
236  av_assert1(uncompressed_size % 2 == 0);
237 
238  s->dsp.predictor(td->tmp, uncompressed_size);
239  s->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size);
240 
241  return 0;
242 }
243 
244 static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size,
245  int uncompressed_size, EXRThreadData *td)
246 {
247  uint8_t *d = td->tmp;
248  const int8_t *s = src;
249  int ssize = compressed_size;
250  int dsize = uncompressed_size;
251  uint8_t *dend = d + dsize;
252  int count;
253 
254  while (ssize > 0) {
255  count = *s++;
256 
257  if (count < 0) {
258  count = -count;
259 
260  if ((dsize -= count) < 0 ||
261  (ssize -= count + 1) < 0)
262  return AVERROR_INVALIDDATA;
263 
264  while (count--)
265  *d++ = *s++;
266  } else {
267  count++;
268 
269  if ((dsize -= count) < 0 ||
270  (ssize -= 2) < 0)
271  return AVERROR_INVALIDDATA;
272 
273  while (count--)
274  *d++ = *s;
275 
276  s++;
277  }
278  }
279 
280  if (dend != d)
281  return AVERROR_INVALIDDATA;
282 
283  av_assert1(uncompressed_size % 2 == 0);
284 
285  ctx->dsp.predictor(td->tmp, uncompressed_size);
286  ctx->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size);
287 
288  return 0;
289 }
290 
291 #define USHORT_RANGE (1 << 16)
292 #define BITMAP_SIZE (1 << 13)
293 
294 static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
295 {
296  int i, k = 0;
297 
298  for (i = 0; i < USHORT_RANGE; i++)
299  if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
300  lut[k++] = i;
301 
302  i = k - 1;
303 
304  memset(lut + k, 0, (USHORT_RANGE - k) * 2);
305 
306  return i;
307 }
308 
309 static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize)
310 {
311  int i;
312 
313  for (i = 0; i < dsize; ++i)
314  dst[i] = lut[dst[i]];
315 }
316 
317 #define HUF_ENCBITS 16 // literal (value) bit length
318 #define HUF_DECBITS 14 // decoding bit size (>= 8)
319 
320 #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size
321 #define HUF_DECSIZE (1 << HUF_DECBITS) // decoding table size
322 #define HUF_DECMASK (HUF_DECSIZE - 1)
323 
324 typedef struct HufDec {
325  int len;
326  int lit;
327  int *p;
328 } HufDec;
329 
330 static void huf_canonical_code_table(uint64_t *hcode)
331 {
332  uint64_t c, n[59] = { 0 };
333  int i;
334 
335  for (i = 0; i < HUF_ENCSIZE; ++i)
336  n[hcode[i]] += 1;
337 
338  c = 0;
339  for (i = 58; i > 0; --i) {
340  uint64_t nc = ((c + n[i]) >> 1);
341  n[i] = c;
342  c = nc;
343  }
344 
345  for (i = 0; i < HUF_ENCSIZE; ++i) {
346  int l = hcode[i];
347 
348  if (l > 0)
349  hcode[i] = l | (n[l]++ << 6);
350  }
351 }
352 
353 #define SHORT_ZEROCODE_RUN 59
354 #define LONG_ZEROCODE_RUN 63
355 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN)
356 #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN)
357 
359  int32_t im, int32_t iM, uint64_t *hcode)
360 {
361  GetBitContext gbit;
362  int ret = init_get_bits8(&gbit, gb->buffer, bytestream2_get_bytes_left(gb));
363  if (ret < 0)
364  return ret;
365 
366  for (; im <= iM; im++) {
367  uint64_t l = hcode[im] = get_bits(&gbit, 6);
368 
369  if (l == LONG_ZEROCODE_RUN) {
370  int zerun = get_bits(&gbit, 8) + SHORTEST_LONG_RUN;
371 
372  if (im + zerun > iM + 1)
373  return AVERROR_INVALIDDATA;
374 
375  while (zerun--)
376  hcode[im++] = 0;
377 
378  im--;
379  } else if (l >= SHORT_ZEROCODE_RUN) {
380  int zerun = l - SHORT_ZEROCODE_RUN + 2;
381 
382  if (im + zerun > iM + 1)
383  return AVERROR_INVALIDDATA;
384 
385  while (zerun--)
386  hcode[im++] = 0;
387 
388  im--;
389  }
390  }
391 
392  bytestream2_skip(gb, (get_bits_count(&gbit) + 7) / 8);
394 
395  return 0;
396 }
397 
398 static int huf_build_dec_table(const uint64_t *hcode, int im,
399  int iM, HufDec *hdecod)
400 {
401  for (; im <= iM; im++) {
402  uint64_t c = hcode[im] >> 6;
403  int i, l = hcode[im] & 63;
404 
405  if (c >> l)
406  return AVERROR_INVALIDDATA;
407 
408  if (l > HUF_DECBITS) {
409  HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
410  if (pl->len)
411  return AVERROR_INVALIDDATA;
412 
413  pl->lit++;
414 
415  pl->p = av_realloc(pl->p, pl->lit * sizeof(int));
416  if (!pl->p)
417  return AVERROR(ENOMEM);
418 
419  pl->p[pl->lit - 1] = im;
420  } else if (l) {
421  HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
422 
423  for (i = 1 << (HUF_DECBITS - l); i > 0; i--, pl++) {
424  if (pl->len || pl->p)
425  return AVERROR_INVALIDDATA;
426  pl->len = l;
427  pl->lit = im;
428  }
429  }
430  }
431 
432  return 0;
433 }
434 
435 #define get_char(c, lc, gb) \
436 { \
437  c = (c << 8) | bytestream2_get_byte(gb); \
438  lc += 8; \
439 }
440 
441 #define get_code(po, rlc, c, lc, gb, out, oe, outb) \
442 { \
443  if (po == rlc) { \
444  if (lc < 8) \
445  get_char(c, lc, gb); \
446  lc -= 8; \
447  \
448  cs = c >> lc; \
449  \
450  if (out + cs > oe || out == outb) \
451  return AVERROR_INVALIDDATA; \
452  \
453  s = out[-1]; \
454  \
455  while (cs-- > 0) \
456  *out++ = s; \
457  } else if (out < oe) { \
458  *out++ = po; \
459  } else { \
460  return AVERROR_INVALIDDATA; \
461  } \
462 }
463 
464 static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
465  GetByteContext *gb, int nbits,
466  int rlc, int no, uint16_t *out)
467 {
468  uint64_t c = 0;
469  uint16_t *outb = out;
470  uint16_t *oe = out + no;
471  const uint8_t *ie = gb->buffer + (nbits + 7) / 8; // input byte size
472  uint8_t cs;
473  uint16_t s;
474  int i, lc = 0;
475 
476  while (gb->buffer < ie) {
477  get_char(c, lc, gb);
478 
479  while (lc >= HUF_DECBITS) {
480  const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
481 
482  if (pl.len) {
483  lc -= pl.len;
484  get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
485  } else {
486  int j;
487 
488  if (!pl.p)
489  return AVERROR_INVALIDDATA;
490 
491  for (j = 0; j < pl.lit; j++) {
492  int l = hcode[pl.p[j]] & 63;
493 
494  while (lc < l && bytestream2_get_bytes_left(gb) > 0)
495  get_char(c, lc, gb);
496 
497  if (lc >= l) {
498  if ((hcode[pl.p[j]] >> 6) ==
499  ((c >> (lc - l)) & ((1LL << l) - 1))) {
500  lc -= l;
501  get_code(pl.p[j], rlc, c, lc, gb, out, oe, outb);
502  break;
503  }
504  }
505  }
506 
507  if (j == pl.lit)
508  return AVERROR_INVALIDDATA;
509  }
510  }
511  }
512 
513  i = (8 - nbits) & 7;
514  c >>= i;
515  lc -= i;
516 
517  while (lc > 0) {
518  const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
519 
520  if (pl.len && lc >= pl.len) {
521  lc -= pl.len;
522  get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
523  } else {
524  return AVERROR_INVALIDDATA;
525  }
526  }
527 
528  if (out - outb != no)
529  return AVERROR_INVALIDDATA;
530  return 0;
531 }
532 
534  uint16_t *dst, int dst_size)
535 {
536  int32_t src_size, im, iM;
537  uint32_t nBits;
538  uint64_t *freq;
539  HufDec *hdec;
540  int ret, i;
541 
542  src_size = bytestream2_get_le32(gb);
543  im = bytestream2_get_le32(gb);
544  iM = bytestream2_get_le32(gb);
545  bytestream2_skip(gb, 4);
546  nBits = bytestream2_get_le32(gb);
547  if (im < 0 || im >= HUF_ENCSIZE ||
548  iM < 0 || iM >= HUF_ENCSIZE ||
549  src_size < 0)
550  return AVERROR_INVALIDDATA;
551 
552  bytestream2_skip(gb, 4);
553 
554  freq = av_mallocz_array(HUF_ENCSIZE, sizeof(*freq));
555  hdec = av_mallocz_array(HUF_DECSIZE, sizeof(*hdec));
556  if (!freq || !hdec) {
557  ret = AVERROR(ENOMEM);
558  goto fail;
559  }
560 
561  if ((ret = huf_unpack_enc_table(gb, im, iM, freq)) < 0)
562  goto fail;
563 
564  if (nBits > 8 * bytestream2_get_bytes_left(gb)) {
565  ret = AVERROR_INVALIDDATA;
566  goto fail;
567  }
568 
569  if ((ret = huf_build_dec_table(freq, im, iM, hdec)) < 0)
570  goto fail;
571  ret = huf_decode(freq, hdec, gb, nBits, iM, dst_size, dst);
572 
573 fail:
574  for (i = 0; i < HUF_DECSIZE; i++)
575  if (hdec)
576  av_freep(&hdec[i].p);
577 
578  av_free(freq);
579  av_free(hdec);
580 
581  return ret;
582 }
583 
584 static inline void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
585 {
586  int16_t ls = l;
587  int16_t hs = h;
588  int hi = hs;
589  int ai = ls + (hi & 1) + (hi >> 1);
590  int16_t as = ai;
591  int16_t bs = ai - hi;
592 
593  *a = as;
594  *b = bs;
595 }
596 
597 #define NBITS 16
598 #define A_OFFSET (1 << (NBITS - 1))
599 #define MOD_MASK ((1 << NBITS) - 1)
600 
601 static inline void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
602 {
603  int m = l;
604  int d = h;
605  int bb = (m - (d >> 1)) & MOD_MASK;
606  int aa = (d + bb - A_OFFSET) & MOD_MASK;
607  *b = bb;
608  *a = aa;
609 }
610 
611 static void wav_decode(uint16_t *in, int nx, int ox,
612  int ny, int oy, uint16_t mx)
613 {
614  int w14 = (mx < (1 << 14));
615  int n = (nx > ny) ? ny : nx;
616  int p = 1;
617  int p2;
618 
619  while (p <= n)
620  p <<= 1;
621 
622  p >>= 1;
623  p2 = p;
624  p >>= 1;
625 
626  while (p >= 1) {
627  uint16_t *py = in;
628  uint16_t *ey = in + oy * (ny - p2);
629  uint16_t i00, i01, i10, i11;
630  int oy1 = oy * p;
631  int oy2 = oy * p2;
632  int ox1 = ox * p;
633  int ox2 = ox * p2;
634 
635  for (; py <= ey; py += oy2) {
636  uint16_t *px = py;
637  uint16_t *ex = py + ox * (nx - p2);
638 
639  for (; px <= ex; px += ox2) {
640  uint16_t *p01 = px + ox1;
641  uint16_t *p10 = px + oy1;
642  uint16_t *p11 = p10 + ox1;
643 
644  if (w14) {
645  wdec14(*px, *p10, &i00, &i10);
646  wdec14(*p01, *p11, &i01, &i11);
647  wdec14(i00, i01, px, p01);
648  wdec14(i10, i11, p10, p11);
649  } else {
650  wdec16(*px, *p10, &i00, &i10);
651  wdec16(*p01, *p11, &i01, &i11);
652  wdec16(i00, i01, px, p01);
653  wdec16(i10, i11, p10, p11);
654  }
655  }
656 
657  if (nx & p) {
658  uint16_t *p10 = px + oy1;
659 
660  if (w14)
661  wdec14(*px, *p10, &i00, p10);
662  else
663  wdec16(*px, *p10, &i00, p10);
664 
665  *px = i00;
666  }
667  }
668 
669  if (ny & p) {
670  uint16_t *px = py;
671  uint16_t *ex = py + ox * (nx - p2);
672 
673  for (; px <= ex; px += ox2) {
674  uint16_t *p01 = px + ox1;
675 
676  if (w14)
677  wdec14(*px, *p01, &i00, p01);
678  else
679  wdec16(*px, *p01, &i00, p01);
680 
681  *px = i00;
682  }
683  }
684 
685  p2 = p;
686  p >>= 1;
687  }
688 }
689 
690 static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize,
691  int dsize, EXRThreadData *td)
692 {
693  GetByteContext gb;
694  uint16_t maxval, min_non_zero, max_non_zero;
695  uint16_t *ptr;
696  uint16_t *tmp = (uint16_t *)td->tmp;
697  uint16_t *out;
698  uint16_t *in;
699  int ret, i, j;
700  int pixel_half_size;/* 1 for half, 2 for float and uint32 */
702  int tmp_offset;
703 
704  if (!td->bitmap)
706  if (!td->lut)
707  td->lut = av_malloc(1 << 17);
708  if (!td->bitmap || !td->lut) {
709  av_freep(&td->bitmap);
710  av_freep(&td->lut);
711  return AVERROR(ENOMEM);
712  }
713 
714  bytestream2_init(&gb, src, ssize);
715  min_non_zero = bytestream2_get_le16(&gb);
716  max_non_zero = bytestream2_get_le16(&gb);
717 
718  if (max_non_zero >= BITMAP_SIZE)
719  return AVERROR_INVALIDDATA;
720 
721  memset(td->bitmap, 0, FFMIN(min_non_zero, BITMAP_SIZE));
722  if (min_non_zero <= max_non_zero)
723  bytestream2_get_buffer(&gb, td->bitmap + min_non_zero,
724  max_non_zero - min_non_zero + 1);
725  memset(td->bitmap + max_non_zero + 1, 0, BITMAP_SIZE - max_non_zero - 1);
726 
727  maxval = reverse_lut(td->bitmap, td->lut);
728 
729  ret = huf_uncompress(&gb, tmp, dsize / sizeof(uint16_t));
730  if (ret)
731  return ret;
732 
733  ptr = tmp;
734  for (i = 0; i < s->nb_channels; i++) {
735  channel = &s->channels[i];
736 
737  if (channel->pixel_type == EXR_HALF)
738  pixel_half_size = 1;
739  else
740  pixel_half_size = 2;
741 
742  for (j = 0; j < pixel_half_size; j++)
743  wav_decode(ptr + j, td->xsize, pixel_half_size, td->ysize,
744  td->xsize * pixel_half_size, maxval);
745  ptr += td->xsize * td->ysize * pixel_half_size;
746  }
747 
748  apply_lut(td->lut, tmp, dsize / sizeof(uint16_t));
749 
750  out = (uint16_t *)td->uncompressed_data;
751  for (i = 0; i < td->ysize; i++) {
752  tmp_offset = 0;
753  for (j = 0; j < s->nb_channels; j++) {
754  channel = &s->channels[j];
755  if (channel->pixel_type == EXR_HALF)
756  pixel_half_size = 1;
757  else
758  pixel_half_size = 2;
759 
760  in = tmp + tmp_offset * td->xsize * td->ysize + i * td->xsize * pixel_half_size;
761  tmp_offset += pixel_half_size;
762 
763 #if HAVE_BIGENDIAN
764  s->bbdsp.bswap16_buf(out, in, td->xsize * pixel_half_size);
765 #else
766  memcpy(out, in, td->xsize * 2 * pixel_half_size);
767 #endif
768  out += td->xsize * pixel_half_size;
769  }
770  }
771 
772  return 0;
773 }
774 
776  int compressed_size, int uncompressed_size,
777  EXRThreadData *td)
778 {
779  unsigned long dest_len, expected_len = 0;
780  const uint8_t *in = td->tmp;
781  uint8_t *out;
782  int c, i, j;
783 
784  for (i = 0; i < s->nb_channels; i++) {
785  if (s->channels[i].pixel_type == EXR_FLOAT) {
786  expected_len += (td->xsize * td->ysize * 3);/* PRX 24 store float in 24 bit instead of 32 */
787  } else if (s->channels[i].pixel_type == EXR_HALF) {
788  expected_len += (td->xsize * td->ysize * 2);
789  } else {//UINT 32
790  expected_len += (td->xsize * td->ysize * 4);
791  }
792  }
793 
794  dest_len = expected_len;
795 
796  if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK) {
797  return AVERROR_INVALIDDATA;
798  } else if (dest_len != expected_len) {
799  return AVERROR_INVALIDDATA;
800  }
801 
802  out = td->uncompressed_data;
803  for (i = 0; i < td->ysize; i++)
804  for (c = 0; c < s->nb_channels; c++) {
805  EXRChannel *channel = &s->channels[c];
806  const uint8_t *ptr[4];
807  uint32_t pixel = 0;
808 
809  switch (channel->pixel_type) {
810  case EXR_FLOAT:
811  ptr[0] = in;
812  ptr[1] = ptr[0] + td->xsize;
813  ptr[2] = ptr[1] + td->xsize;
814  in = ptr[2] + td->xsize;
815 
816  for (j = 0; j < td->xsize; ++j) {
817  uint32_t diff = ((unsigned)*(ptr[0]++) << 24) |
818  (*(ptr[1]++) << 16) |
819  (*(ptr[2]++) << 8);
820  pixel += diff;
821  bytestream_put_le32(&out, pixel);
822  }
823  break;
824  case EXR_HALF:
825  ptr[0] = in;
826  ptr[1] = ptr[0] + td->xsize;
827  in = ptr[1] + td->xsize;
828  for (j = 0; j < td->xsize; j++) {
829  uint32_t diff = (*(ptr[0]++) << 8) | *(ptr[1]++);
830 
831  pixel += diff;
832  bytestream_put_le16(&out, pixel);
833  }
834  break;
835  case EXR_UINT:
836  ptr[0] = in;
837  ptr[1] = ptr[0] + s->xdelta;
838  ptr[2] = ptr[1] + s->xdelta;
839  ptr[3] = ptr[2] + s->xdelta;
840  in = ptr[3] + s->xdelta;
841 
842  for (j = 0; j < s->xdelta; ++j) {
843  uint32_t diff = ((uint32_t)*(ptr[0]++) << 24) |
844  (*(ptr[1]++) << 16) |
845  (*(ptr[2]++) << 8 ) |
846  (*(ptr[3]++));
847  pixel += diff;
848  bytestream_put_le32(&out, pixel);
849  }
850  break;
851  default:
852  return AVERROR_INVALIDDATA;
853  }
854  }
855 
856  return 0;
857 }
858 
859 static void unpack_14(const uint8_t b[14], uint16_t s[16])
860 {
861  unsigned short shift = (b[ 2] >> 2) & 15;
862  unsigned short bias = (0x20 << shift);
863  int i;
864 
865  s[ 0] = (b[0] << 8) | b[1];
866 
867  s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3f) << shift) - bias;
868  s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3f) << shift) - bias;
869  s[12] = s[ 8] + ((b[ 4] & 0x3f) << shift) - bias;
870 
871  s[ 1] = s[ 0] + ((b[ 5] >> 2) << shift) - bias;
872  s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3f) << shift) - bias;
873  s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3f) << shift) - bias;
874  s[13] = s[12] + ((b[ 7] & 0x3f) << shift) - bias;
875 
876  s[ 2] = s[ 1] + ((b[ 8] >> 2) << shift) - bias;
877  s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3f) << shift) - bias;
878  s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3f) << shift) - bias;
879  s[14] = s[13] + ((b[10] & 0x3f) << shift) - bias;
880 
881  s[ 3] = s[ 2] + ((b[11] >> 2) << shift) - bias;
882  s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3f) << shift) - bias;
883  s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3f) << shift) - bias;
884  s[15] = s[14] + ((b[13] & 0x3f) << shift) - bias;
885 
886  for (i = 0; i < 16; ++i) {
887  if (s[i] & 0x8000)
888  s[i] &= 0x7fff;
889  else
890  s[i] = ~s[i];
891  }
892 }
893 
894 static void unpack_3(const uint8_t b[3], uint16_t s[16])
895 {
896  int i;
897 
898  s[0] = (b[0] << 8) | b[1];
899 
900  if (s[0] & 0x8000)
901  s[0] &= 0x7fff;
902  else
903  s[0] = ~s[0];
904 
905  for (i = 1; i < 16; i++)
906  s[i] = s[0];
907 }
908 
909 
910 static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size,
911  int uncompressed_size, EXRThreadData *td) {
912  const int8_t *sr = src;
913  int stay_to_uncompress = compressed_size;
914  int nb_b44_block_w, nb_b44_block_h;
915  int index_tl_x, index_tl_y, index_out, index_tmp;
916  uint16_t tmp_buffer[16]; /* B44 use 4x4 half float pixel */
917  int c, iY, iX, y, x;
918  int target_channel_offset = 0;
919 
920  /* calc B44 block count */
921  nb_b44_block_w = td->xsize / 4;
922  if ((td->xsize % 4) != 0)
923  nb_b44_block_w++;
924 
925  nb_b44_block_h = td->ysize / 4;
926  if ((td->ysize % 4) != 0)
927  nb_b44_block_h++;
928 
929  for (c = 0; c < s->nb_channels; c++) {
930  if (s->channels[c].pixel_type == EXR_HALF) {/* B44 only compress half float data */
931  for (iY = 0; iY < nb_b44_block_h; iY++) {
932  for (iX = 0; iX < nb_b44_block_w; iX++) {/* For each B44 block */
933  if (stay_to_uncompress < 3) {
934  av_log(s, AV_LOG_ERROR, "Not enough data for B44A block: %d", stay_to_uncompress);
935  return AVERROR_INVALIDDATA;
936  }
937 
938  if (src[compressed_size - stay_to_uncompress + 2] == 0xfc) { /* B44A block */
939  unpack_3(sr, tmp_buffer);
940  sr += 3;
941  stay_to_uncompress -= 3;
942  } else {/* B44 Block */
943  if (stay_to_uncompress < 14) {
944  av_log(s, AV_LOG_ERROR, "Not enough data for B44 block: %d", stay_to_uncompress);
945  return AVERROR_INVALIDDATA;
946  }
947  unpack_14(sr, tmp_buffer);
948  sr += 14;
949  stay_to_uncompress -= 14;
950  }
951 
952  /* copy data to uncompress buffer (B44 block can exceed target resolution)*/
953  index_tl_x = iX * 4;
954  index_tl_y = iY * 4;
955 
956  for (y = index_tl_y; y < FFMIN(index_tl_y + 4, td->ysize); y++) {
957  for (x = index_tl_x; x < FFMIN(index_tl_x + 4, td->xsize); x++) {
958  index_out = target_channel_offset * td->xsize + y * td->channel_line_size + 2 * x;
959  index_tmp = (y-index_tl_y) * 4 + (x-index_tl_x);
960  td->uncompressed_data[index_out] = tmp_buffer[index_tmp] & 0xff;
961  td->uncompressed_data[index_out + 1] = tmp_buffer[index_tmp] >> 8;
962  }
963  }
964  }
965  }
966  target_channel_offset += 2;
967  } else {/* Float or UINT 32 channel */
968  if (stay_to_uncompress < td->ysize * td->xsize * 4) {
969  av_log(s, AV_LOG_ERROR, "Not enough data for uncompress channel: %d", stay_to_uncompress);
970  return AVERROR_INVALIDDATA;
971  }
972 
973  for (y = 0; y < td->ysize; y++) {
974  index_out = target_channel_offset * td->xsize + y * td->channel_line_size;
975  memcpy(&td->uncompressed_data[index_out], sr, td->xsize * 4);
976  sr += td->xsize * 4;
977  }
978  target_channel_offset += 4;
979 
980  stay_to_uncompress -= td->ysize * td->xsize * 4;
981  }
982  }
983 
984  return 0;
985 }
986 
987 static int decode_block(AVCodecContext *avctx, void *tdata,
988  int jobnr, int threadnr)
989 {
990  EXRContext *s = avctx->priv_data;
991  AVFrame *const p = s->picture;
992  EXRThreadData *td = &s->thread_data[threadnr];
993  const uint8_t *channel_buffer[4] = { 0 };
994  const uint8_t *buf = s->buf;
995  uint64_t line_offset, uncompressed_size;
996  uint8_t *ptr;
997  uint32_t data_size;
998  int line, col = 0;
999  uint64_t tile_x, tile_y, tile_level_x, tile_level_y;
1000  const uint8_t *src;
1001  int step = s->desc->flags & AV_PIX_FMT_FLAG_FLOAT ? 4 : 2 * s->desc->nb_components;
1002  int bxmin = 0, axmax = 0, window_xoffset = 0;
1003  int window_xmin, window_xmax, window_ymin, window_ymax;
1004  int data_xoffset, data_yoffset, data_window_offset, xsize, ysize;
1005  int i, x, buf_size = s->buf_size;
1006  int c, rgb_channel_count;
1007  float one_gamma = 1.0f / s->gamma;
1009  int ret;
1010 
1011  line_offset = AV_RL64(s->gb.buffer + jobnr * 8);
1012 
1013  if (s->is_tile) {
1014  if (buf_size < 20 || line_offset > buf_size - 20)
1015  return AVERROR_INVALIDDATA;
1016 
1017  src = buf + line_offset + 20;
1018 
1019  tile_x = AV_RL32(src - 20);
1020  tile_y = AV_RL32(src - 16);
1021  tile_level_x = AV_RL32(src - 12);
1022  tile_level_y = AV_RL32(src - 8);
1023 
1024  data_size = AV_RL32(src - 4);
1025  if (data_size <= 0 || data_size > buf_size - line_offset - 20)
1026  return AVERROR_INVALIDDATA;
1027 
1028  if (tile_level_x || tile_level_y) { /* tile level, is not the full res level */
1029  avpriv_report_missing_feature(s->avctx, "Subres tile before full res tile");
1030  return AVERROR_PATCHWELCOME;
1031  }
1032 
1033  line = s->ymin + s->tile_attr.ySize * tile_y;
1034  col = s->tile_attr.xSize * tile_x;
1035 
1036  if (line < s->ymin || line > s->ymax ||
1037  s->xmin + col < s->xmin || s->xmin + col > s->xmax)
1038  return AVERROR_INVALIDDATA;
1039 
1040  td->ysize = FFMIN(s->tile_attr.ySize, s->ydelta - tile_y * s->tile_attr.ySize);
1041  td->xsize = FFMIN(s->tile_attr.xSize, s->xdelta - tile_x * s->tile_attr.xSize);
1042 
1043  if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX)
1044  return AVERROR_INVALIDDATA;
1045 
1046  td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */
1047  uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */
1048  } else {
1049  if (buf_size < 8 || line_offset > buf_size - 8)
1050  return AVERROR_INVALIDDATA;
1051 
1052  src = buf + line_offset + 8;
1053  line = AV_RL32(src - 8);
1054 
1055  if (line < s->ymin || line > s->ymax)
1056  return AVERROR_INVALIDDATA;
1057 
1058  data_size = AV_RL32(src - 4);
1059  if (data_size <= 0 || data_size > buf_size - line_offset - 8)
1060  return AVERROR_INVALIDDATA;
1061 
1062  td->ysize = FFMIN(s->scan_lines_per_block, s->ymax - line + 1); /* s->ydelta - line ?? */
1063  td->xsize = s->xdelta;
1064 
1065  if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX)
1066  return AVERROR_INVALIDDATA;
1067 
1068  td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */
1069  uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */
1070 
1071  if ((s->compression == EXR_RAW && (data_size != uncompressed_size ||
1072  line_offset > buf_size - uncompressed_size)) ||
1073  (s->compression != EXR_RAW && (data_size > uncompressed_size ||
1074  line_offset > buf_size - data_size))) {
1075  return AVERROR_INVALIDDATA;
1076  }
1077  }
1078 
1079  window_xmin = FFMIN(avctx->width, FFMAX(0, s->xmin + col));
1080  window_xmax = FFMIN(avctx->width, FFMAX(0, s->xmin + col + td->xsize));
1081  window_ymin = FFMIN(avctx->height, FFMAX(0, line ));
1082  window_ymax = FFMIN(avctx->height, FFMAX(0, line + td->ysize));
1083  xsize = window_xmax - window_xmin;
1084  ysize = window_ymax - window_ymin;
1085 
1086  /* tile or scanline not visible skip decoding */
1087  if (xsize <= 0 || ysize <= 0)
1088  return 0;
1089 
1090  /* is the first tile or is a scanline */
1091  if(col == 0) {
1092  window_xmin = 0;
1093  /* pixels to add at the left of the display window */
1094  window_xoffset = FFMAX(0, s->xmin);
1095  /* bytes to add at the left of the display window */
1096  bxmin = window_xoffset * step;
1097  }
1098 
1099  /* is the last tile or is a scanline */
1100  if(col + td->xsize == s->xdelta) {
1101  window_xmax = avctx->width;
1102  /* bytes to add at the right of the display window */
1103  axmax = FFMAX(0, (avctx->width - (s->xmax + 1))) * step;
1104  }
1105 
1106  if (data_size < uncompressed_size || s->is_tile) { /* td->tmp is use for tile reorganization */
1107  av_fast_padded_malloc(&td->tmp, &td->tmp_size, uncompressed_size);
1108  if (!td->tmp)
1109  return AVERROR(ENOMEM);
1110  }
1111 
1112  if (data_size < uncompressed_size) {
1114  &td->uncompressed_size, uncompressed_size + 64);/* Force 64 padding for AVX2 reorder_pixels dst */
1115 
1116  if (!td->uncompressed_data)
1117  return AVERROR(ENOMEM);
1118 
1119  ret = AVERROR_INVALIDDATA;
1120  switch (s->compression) {
1121  case EXR_ZIP1:
1122  case EXR_ZIP16:
1123  ret = zip_uncompress(s, src, data_size, uncompressed_size, td);
1124  break;
1125  case EXR_PIZ:
1126  ret = piz_uncompress(s, src, data_size, uncompressed_size, td);
1127  break;
1128  case EXR_PXR24:
1129  ret = pxr24_uncompress(s, src, data_size, uncompressed_size, td);
1130  break;
1131  case EXR_RLE:
1132  ret = rle_uncompress(s, src, data_size, uncompressed_size, td);
1133  break;
1134  case EXR_B44:
1135  case EXR_B44A:
1136  ret = b44_uncompress(s, src, data_size, uncompressed_size, td);
1137  break;
1138  }
1139  if (ret < 0) {
1140  av_log(avctx, AV_LOG_ERROR, "decode_block() failed.\n");
1141  return ret;
1142  }
1143  src = td->uncompressed_data;
1144  }
1145 
1146  /* offsets to crop data outside display window */
1147  data_xoffset = FFABS(FFMIN(0, s->xmin + col)) * (s->pixel_type == EXR_HALF ? 2 : 4);
1148  data_yoffset = FFABS(FFMIN(0, line));
1149  data_window_offset = (data_yoffset * td->channel_line_size) + data_xoffset;
1150 
1151  if (!s->is_luma) {
1152  channel_buffer[0] = src + (td->xsize * s->channel_offsets[0]) + data_window_offset;
1153  channel_buffer[1] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset;
1154  channel_buffer[2] = src + (td->xsize * s->channel_offsets[2]) + data_window_offset;
1155  rgb_channel_count = 3;
1156  } else { /* put y data in the first channel_buffer */
1157  channel_buffer[0] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset;
1158  rgb_channel_count = 1;
1159  }
1160  if (s->channel_offsets[3] >= 0)
1161  channel_buffer[3] = src + (td->xsize * s->channel_offsets[3]) + data_window_offset;
1162 
1163  if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) {
1164 
1165  /* todo: change this when a floating point pixel format with luma with alpha is implemented */
1166  int channel_count = s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count;
1167  if (s->is_luma) {
1168  channel_buffer[1] = channel_buffer[0];
1169  channel_buffer[2] = channel_buffer[0];
1170  }
1171 
1172  for (c = 0; c < channel_count; c++) {
1173  int plane = s->desc->comp[c].plane;
1174  ptr = p->data[plane] + window_ymin * p->linesize[plane] + (window_xmin * 4);
1175 
1176  for (i = 0; i < ysize; i++, ptr += p->linesize[plane]) {
1177  const uint8_t *src;
1178  union av_intfloat32 *ptr_x;
1179 
1180  src = channel_buffer[c];
1181  ptr_x = (union av_intfloat32 *)ptr;
1182 
1183  // Zero out the start if xmin is not 0
1184  memset(ptr_x, 0, bxmin);
1185  ptr_x += window_xoffset;
1186 
1187  if (s->pixel_type == EXR_FLOAT) {
1188  // 32-bit
1189  union av_intfloat32 t;
1190  if (trc_func && c < 3) {
1191  for (x = 0; x < xsize; x++) {
1192  t.i = bytestream_get_le32(&src);
1193  t.f = trc_func(t.f);
1194  *ptr_x++ = t;
1195  }
1196  } else {
1197  for (x = 0; x < xsize; x++) {
1198  t.i = bytestream_get_le32(&src);
1199  if (t.f > 0.0f && c < 3) /* avoid negative values */
1200  t.f = powf(t.f, one_gamma);
1201  *ptr_x++ = t;
1202  }
1203  }
1204  } else if (s->pixel_type == EXR_HALF) {
1205  // 16-bit
1206  if (c < 3) {
1207  for (x = 0; x < xsize; x++) {
1208  *ptr_x++ = s->gamma_table[bytestream_get_le16(&src)];
1209  }
1210  } else {
1211  for (x = 0; x < xsize; x++) {
1212  *ptr_x++ = exr_half2float(bytestream_get_le16(&src));;
1213  }
1214  }
1215  }
1216 
1217  // Zero out the end if xmax+1 is not w
1218  memset(ptr_x, 0, axmax);
1219  channel_buffer[c] += td->channel_line_size;
1220  }
1221  }
1222  } else {
1223 
1225  ptr = p->data[0] + window_ymin * p->linesize[0] + (window_xmin * s->desc->nb_components * 2);
1226 
1227  for (i = 0; i < ysize; i++, ptr += p->linesize[0]) {
1228 
1229  const uint8_t * a;
1230  const uint8_t *rgb[3];
1231  uint16_t *ptr_x;
1232 
1233  for (c = 0; c < rgb_channel_count; c++) {
1234  rgb[c] = channel_buffer[c];
1235  }
1236 
1237  if (channel_buffer[3])
1238  a = channel_buffer[3];
1239 
1240  ptr_x = (uint16_t *) ptr;
1241 
1242  // Zero out the start if xmin is not 0
1243  memset(ptr_x, 0, bxmin);
1244  ptr_x += window_xoffset * s->desc->nb_components;
1245 
1246  for (x = 0; x < xsize; x++) {
1247  for (c = 0; c < rgb_channel_count; c++) {
1248  *ptr_x++ = bytestream_get_le32(&rgb[c]) >> 16;
1249  }
1250 
1251  if (channel_buffer[3])
1252  *ptr_x++ = bytestream_get_le32(&a) >> 16;
1253  }
1254 
1255  // Zero out the end if xmax+1 is not w
1256  memset(ptr_x, 0, axmax);
1257 
1258  channel_buffer[0] += td->channel_line_size;
1259  channel_buffer[1] += td->channel_line_size;
1260  channel_buffer[2] += td->channel_line_size;
1261  if (channel_buffer[3])
1262  channel_buffer[3] += td->channel_line_size;
1263  }
1264  }
1265 
1266  return 0;
1267 }
1268 
1269 /**
1270  * Check if the variable name corresponds to its data type.
1271  *
1272  * @param s the EXRContext
1273  * @param value_name name of the variable to check
1274  * @param value_type type of the variable to check
1275  * @param minimum_length minimum length of the variable data
1276  *
1277  * @return bytes to read containing variable data
1278  * -1 if variable is not found
1279  * 0 if buffer ended prematurely
1280  */
1282  const char *value_name,
1283  const char *value_type,
1284  unsigned int minimum_length)
1285 {
1286  int var_size = -1;
1287 
1288  if (bytestream2_get_bytes_left(&s->gb) >= minimum_length &&
1289  !strcmp(s->gb.buffer, value_name)) {
1290  // found value_name, jump to value_type (null terminated strings)
1291  s->gb.buffer += strlen(value_name) + 1;
1292  if (!strcmp(s->gb.buffer, value_type)) {
1293  s->gb.buffer += strlen(value_type) + 1;
1294  var_size = bytestream2_get_le32(&s->gb);
1295  // don't go read past boundaries
1296  if (var_size > bytestream2_get_bytes_left(&s->gb))
1297  var_size = 0;
1298  } else {
1299  // value_type not found, reset the buffer
1300  s->gb.buffer -= strlen(value_name) + 1;
1302  "Unknown data type %s for header variable %s.\n",
1303  value_type, value_name);
1304  }
1305  }
1306 
1307  return var_size;
1308 }
1309 
1311 {
1312  AVDictionary *metadata = NULL;
1313  int magic_number, version, i, flags, sar = 0;
1314  int layer_match = 0;
1315  int ret;
1316  int dup_channels = 0;
1317 
1318  s->current_channel_offset = 0;
1319  s->xmin = ~0;
1320  s->xmax = ~0;
1321  s->ymin = ~0;
1322  s->ymax = ~0;
1323  s->xdelta = ~0;
1324  s->ydelta = ~0;
1325  s->channel_offsets[0] = -1;
1326  s->channel_offsets[1] = -1;
1327  s->channel_offsets[2] = -1;
1328  s->channel_offsets[3] = -1;
1329  s->pixel_type = EXR_UNKNOWN;
1330  s->compression = EXR_UNKN;
1331  s->nb_channels = 0;
1332  s->w = 0;
1333  s->h = 0;
1334  s->tile_attr.xSize = -1;
1335  s->tile_attr.ySize = -1;
1336  s->is_tile = 0;
1337  s->is_luma = 0;
1338 
1339  if (bytestream2_get_bytes_left(&s->gb) < 10) {
1340  av_log(s->avctx, AV_LOG_ERROR, "Header too short to parse.\n");
1341  return AVERROR_INVALIDDATA;
1342  }
1343 
1344  magic_number = bytestream2_get_le32(&s->gb);
1345  if (magic_number != 20000630) {
1346  /* As per documentation of OpenEXR, it is supposed to be
1347  * int 20000630 little-endian */
1348  av_log(s->avctx, AV_LOG_ERROR, "Wrong magic number %d.\n", magic_number);
1349  return AVERROR_INVALIDDATA;
1350  }
1351 
1352  version = bytestream2_get_byte(&s->gb);
1353  if (version != 2) {
1354  avpriv_report_missing_feature(s->avctx, "Version %d", version);
1355  return AVERROR_PATCHWELCOME;
1356  }
1357 
1358  flags = bytestream2_get_le24(&s->gb);
1359 
1360  if (flags & 0x02)
1361  s->is_tile = 1;
1362  if (flags & 0x08) {
1363  avpriv_report_missing_feature(s->avctx, "deep data");
1364  return AVERROR_PATCHWELCOME;
1365  }
1366  if (flags & 0x10) {
1367  avpriv_report_missing_feature(s->avctx, "multipart");
1368  return AVERROR_PATCHWELCOME;
1369  }
1370 
1371  // Parse the header
1372  while (bytestream2_get_bytes_left(&s->gb) > 0 && *s->gb.buffer) {
1373  int var_size;
1374  if ((var_size = check_header_variable(s, "channels",
1375  "chlist", 38)) >= 0) {
1376  GetByteContext ch_gb;
1377  if (!var_size) {
1378  ret = AVERROR_INVALIDDATA;
1379  goto fail;
1380  }
1381 
1382  bytestream2_init(&ch_gb, s->gb.buffer, var_size);
1383 
1384  while (bytestream2_get_bytes_left(&ch_gb) >= 19) {
1386  enum ExrPixelType current_pixel_type;
1387  int channel_index = -1;
1388  int xsub, ysub;
1389 
1390  if (strcmp(s->layer, "") != 0) {
1391  if (strncmp(ch_gb.buffer, s->layer, strlen(s->layer)) == 0) {
1392  layer_match = 1;
1393  av_log(s->avctx, AV_LOG_INFO,
1394  "Channel match layer : %s.\n", ch_gb.buffer);
1395  ch_gb.buffer += strlen(s->layer);
1396  if (*ch_gb.buffer == '.')
1397  ch_gb.buffer++; /* skip dot if not given */
1398  } else {
1399  layer_match = 0;
1400  av_log(s->avctx, AV_LOG_INFO,
1401  "Channel doesn't match layer : %s.\n", ch_gb.buffer);
1402  }
1403  } else {
1404  layer_match = 1;
1405  }
1406 
1407  if (layer_match) { /* only search channel if the layer match is valid */
1408  if (!av_strcasecmp(ch_gb.buffer, "R") ||
1409  !av_strcasecmp(ch_gb.buffer, "X") ||
1410  !av_strcasecmp(ch_gb.buffer, "U")) {
1411  channel_index = 0;
1412  s->is_luma = 0;
1413  } else if (!av_strcasecmp(ch_gb.buffer, "G") ||
1414  !av_strcasecmp(ch_gb.buffer, "V")) {
1415  channel_index = 1;
1416  s->is_luma = 0;
1417  } else if (!av_strcasecmp(ch_gb.buffer, "Y")) {
1418  channel_index = 1;
1419  s->is_luma = 1;
1420  } else if (!av_strcasecmp(ch_gb.buffer, "B") ||
1421  !av_strcasecmp(ch_gb.buffer, "Z") ||
1422  !av_strcasecmp(ch_gb.buffer, "W")) {
1423  channel_index = 2;
1424  s->is_luma = 0;
1425  } else if (!av_strcasecmp(ch_gb.buffer, "A")) {
1426  channel_index = 3;
1427  } else {
1429  "Unsupported channel %.256s.\n", ch_gb.buffer);
1430  }
1431  }
1432 
1433  /* skip until you get a 0 */
1434  while (bytestream2_get_bytes_left(&ch_gb) > 0 &&
1435  bytestream2_get_byte(&ch_gb))
1436  continue;
1437 
1438  if (bytestream2_get_bytes_left(&ch_gb) < 4) {
1439  av_log(s->avctx, AV_LOG_ERROR, "Incomplete header.\n");
1440  ret = AVERROR_INVALIDDATA;
1441  goto fail;
1442  }
1443 
1444  current_pixel_type = bytestream2_get_le32(&ch_gb);
1445  if (current_pixel_type >= EXR_UNKNOWN) {
1446  avpriv_report_missing_feature(s->avctx, "Pixel type %d",
1447  current_pixel_type);
1448  ret = AVERROR_PATCHWELCOME;
1449  goto fail;
1450  }
1451 
1452  bytestream2_skip(&ch_gb, 4);
1453  xsub = bytestream2_get_le32(&ch_gb);
1454  ysub = bytestream2_get_le32(&ch_gb);
1455 
1456  if (xsub != 1 || ysub != 1) {
1458  "Subsampling %dx%d",
1459  xsub, ysub);
1460  ret = AVERROR_PATCHWELCOME;
1461  goto fail;
1462  }
1463 
1464  if (channel_index >= 0 && s->channel_offsets[channel_index] == -1) { /* channel has not been previously assigned */
1465  if (s->pixel_type != EXR_UNKNOWN &&
1466  s->pixel_type != current_pixel_type) {
1468  "RGB channels not of the same depth.\n");
1469  ret = AVERROR_INVALIDDATA;
1470  goto fail;
1471  }
1472  s->pixel_type = current_pixel_type;
1473  s->channel_offsets[channel_index] = s->current_channel_offset;
1474  } else if (channel_index >= 0) {
1476  "Multiple channels with index %d.\n", channel_index);
1477  if (++dup_channels > 10) {
1478  ret = AVERROR_INVALIDDATA;
1479  goto fail;
1480  }
1481  }
1482 
1483  s->channels = av_realloc(s->channels,
1484  ++s->nb_channels * sizeof(EXRChannel));
1485  if (!s->channels) {
1486  ret = AVERROR(ENOMEM);
1487  goto fail;
1488  }
1489  channel = &s->channels[s->nb_channels - 1];
1490  channel->pixel_type = current_pixel_type;
1491  channel->xsub = xsub;
1492  channel->ysub = ysub;
1493 
1494  if (current_pixel_type == EXR_HALF) {
1495  s->current_channel_offset += 2;
1496  } else {/* Float or UINT32 */
1497  s->current_channel_offset += 4;
1498  }
1499  }
1500 
1501  /* Check if all channels are set with an offset or if the channels
1502  * are causing an overflow */
1503  if (!s->is_luma) {/* if we expected to have at least 3 channels */
1504  if (FFMIN3(s->channel_offsets[0],
1505  s->channel_offsets[1],
1506  s->channel_offsets[2]) < 0) {
1507  if (s->channel_offsets[0] < 0)
1508  av_log(s->avctx, AV_LOG_ERROR, "Missing red channel.\n");
1509  if (s->channel_offsets[1] < 0)
1510  av_log(s->avctx, AV_LOG_ERROR, "Missing green channel.\n");
1511  if (s->channel_offsets[2] < 0)
1512  av_log(s->avctx, AV_LOG_ERROR, "Missing blue channel.\n");
1513  ret = AVERROR_INVALIDDATA;
1514  goto fail;
1515  }
1516  }
1517 
1518  // skip one last byte and update main gb
1519  s->gb.buffer = ch_gb.buffer + 1;
1520  continue;
1521  } else if ((var_size = check_header_variable(s, "dataWindow", "box2i",
1522  31)) >= 0) {
1523  int xmin, ymin, xmax, ymax;
1524  if (!var_size) {
1525  ret = AVERROR_INVALIDDATA;
1526  goto fail;
1527  }
1528 
1529  xmin = bytestream2_get_le32(&s->gb);
1530  ymin = bytestream2_get_le32(&s->gb);
1531  xmax = bytestream2_get_le32(&s->gb);
1532  ymax = bytestream2_get_le32(&s->gb);
1533 
1534  if (xmin > xmax || ymin > ymax ||
1535  (unsigned)xmax - xmin >= INT_MAX ||
1536  (unsigned)ymax - ymin >= INT_MAX) {
1537  ret = AVERROR_INVALIDDATA;
1538  goto fail;
1539  }
1540  s->xmin = xmin;
1541  s->xmax = xmax;
1542  s->ymin = ymin;
1543  s->ymax = ymax;
1544  s->xdelta = (s->xmax - s->xmin) + 1;
1545  s->ydelta = (s->ymax - s->ymin) + 1;
1546 
1547  continue;
1548  } else if ((var_size = check_header_variable(s, "displayWindow",
1549  "box2i", 34)) >= 0) {
1550  if (!var_size) {
1551  ret = AVERROR_INVALIDDATA;
1552  goto fail;
1553  }
1554 
1555  bytestream2_skip(&s->gb, 8);
1556  s->w = bytestream2_get_le32(&s->gb) + 1;
1557  s->h = bytestream2_get_le32(&s->gb) + 1;
1558 
1559  continue;
1560  } else if ((var_size = check_header_variable(s, "lineOrder",
1561  "lineOrder", 25)) >= 0) {
1562  int line_order;
1563  if (!var_size) {
1564  ret = AVERROR_INVALIDDATA;
1565  goto fail;
1566  }
1567 
1568  line_order = bytestream2_get_byte(&s->gb);
1569  av_log(s->avctx, AV_LOG_DEBUG, "line order: %d.\n", line_order);
1570  if (line_order > 2) {
1571  av_log(s->avctx, AV_LOG_ERROR, "Unknown line order.\n");
1572  ret = AVERROR_INVALIDDATA;
1573  goto fail;
1574  }
1575 
1576  continue;
1577  } else if ((var_size = check_header_variable(s, "pixelAspectRatio",
1578  "float", 31)) >= 0) {
1579  if (!var_size) {
1580  ret = AVERROR_INVALIDDATA;
1581  goto fail;
1582  }
1583 
1584  sar = bytestream2_get_le32(&s->gb);
1585 
1586  continue;
1587  } else if ((var_size = check_header_variable(s, "compression",
1588  "compression", 29)) >= 0) {
1589  if (!var_size) {
1590  ret = AVERROR_INVALIDDATA;
1591  goto fail;
1592  }
1593 
1594  if (s->compression == EXR_UNKN)
1595  s->compression = bytestream2_get_byte(&s->gb);
1596  else
1598  "Found more than one compression attribute.\n");
1599 
1600  continue;
1601  } else if ((var_size = check_header_variable(s, "tiles",
1602  "tiledesc", 22)) >= 0) {
1603  char tileLevel;
1604 
1605  if (!s->is_tile)
1607  "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n");
1608 
1609  s->tile_attr.xSize = bytestream2_get_le32(&s->gb);
1610  s->tile_attr.ySize = bytestream2_get_le32(&s->gb);
1611 
1612  tileLevel = bytestream2_get_byte(&s->gb);
1613  s->tile_attr.level_mode = tileLevel & 0x0f;
1614  s->tile_attr.level_round = (tileLevel >> 4) & 0x0f;
1615 
1617  avpriv_report_missing_feature(s->avctx, "Tile level mode %d",
1618  s->tile_attr.level_mode);
1619  ret = AVERROR_PATCHWELCOME;
1620  goto fail;
1621  }
1622 
1624  avpriv_report_missing_feature(s->avctx, "Tile level round %d",
1625  s->tile_attr.level_round);
1626  ret = AVERROR_PATCHWELCOME;
1627  goto fail;
1628  }
1629 
1630  continue;
1631  } else if ((var_size = check_header_variable(s, "writer",
1632  "string", 1)) >= 0) {
1633  uint8_t key[256] = { 0 };
1634 
1635  bytestream2_get_buffer(&s->gb, key, FFMIN(sizeof(key) - 1, var_size));
1636  av_dict_set(&metadata, "writer", key, 0);
1637 
1638  continue;
1639  }
1640 
1641  // Check if there are enough bytes for a header
1642  if (bytestream2_get_bytes_left(&s->gb) <= 9) {
1643  av_log(s->avctx, AV_LOG_ERROR, "Incomplete header\n");
1644  ret = AVERROR_INVALIDDATA;
1645  goto fail;
1646  }
1647 
1648  // Process unknown variables
1649  for (i = 0; i < 2; i++) // value_name and value_type
1650  while (bytestream2_get_byte(&s->gb) != 0);
1651 
1652  // Skip variable length
1653  bytestream2_skip(&s->gb, bytestream2_get_le32(&s->gb));
1654  }
1655 
1656  ff_set_sar(s->avctx, av_d2q(av_int2float(sar), 255));
1657 
1658  if (s->compression == EXR_UNKN) {
1659  av_log(s->avctx, AV_LOG_ERROR, "Missing compression attribute.\n");
1660  ret = AVERROR_INVALIDDATA;
1661  goto fail;
1662  }
1663 
1664  if (s->is_tile) {
1665  if (s->tile_attr.xSize < 1 || s->tile_attr.ySize < 1) {
1666  av_log(s->avctx, AV_LOG_ERROR, "Invalid tile attribute.\n");
1667  ret = AVERROR_INVALIDDATA;
1668  goto fail;
1669  }
1670  }
1671 
1672  if (bytestream2_get_bytes_left(&s->gb) <= 0) {
1673  av_log(s->avctx, AV_LOG_ERROR, "Incomplete frame.\n");
1674  ret = AVERROR_INVALIDDATA;
1675  goto fail;
1676  }
1677 
1678  frame->metadata = metadata;
1679 
1680  // aaand we are done
1681  bytestream2_skip(&s->gb, 1);
1682  return 0;
1683 fail:
1684  av_dict_free(&metadata);
1685  return ret;
1686 }
1687 
1688 static int decode_frame(AVCodecContext *avctx, void *data,
1689  int *got_frame, AVPacket *avpkt)
1690 {
1691  EXRContext *s = avctx->priv_data;
1692  ThreadFrame frame = { .f = data };
1693  AVFrame *picture = data;
1694  uint8_t *ptr;
1695 
1696  int i, y, ret, ymax;
1697  int planes;
1698  int out_line_size;
1699  int nb_blocks; /* nb scanline or nb tile */
1700  uint64_t start_offset_table;
1701  uint64_t start_next_scanline;
1702  PutByteContext offset_table_writer;
1703 
1704  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1705 
1706  if ((ret = decode_header(s, picture)) < 0)
1707  return ret;
1708 
1709  switch (s->pixel_type) {
1710  case EXR_FLOAT:
1711  case EXR_HALF:
1712  if (s->channel_offsets[3] >= 0) {
1713  if (!s->is_luma) {
1714  avctx->pix_fmt = AV_PIX_FMT_GBRAPF32;
1715  } else {
1716  /* todo: change this when a floating point pixel format with luma with alpha is implemented */
1717  avctx->pix_fmt = AV_PIX_FMT_GBRAPF32;
1718  }
1719  } else {
1720  if (!s->is_luma) {
1721  avctx->pix_fmt = AV_PIX_FMT_GBRPF32;
1722  } else {
1723  avctx->pix_fmt = AV_PIX_FMT_GRAYF32;
1724  }
1725  }
1726  break;
1727  case EXR_UINT:
1728  if (s->channel_offsets[3] >= 0) {
1729  if (!s->is_luma) {
1730  avctx->pix_fmt = AV_PIX_FMT_RGBA64;
1731  } else {
1732  avctx->pix_fmt = AV_PIX_FMT_YA16;
1733  }
1734  } else {
1735  if (!s->is_luma) {
1736  avctx->pix_fmt = AV_PIX_FMT_RGB48;
1737  } else {
1738  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
1739  }
1740  }
1741  break;
1742  default:
1743  av_log(avctx, AV_LOG_ERROR, "Missing channel list.\n");
1744  return AVERROR_INVALIDDATA;
1745  }
1746 
1748  avctx->color_trc = s->apply_trc_type;
1749 
1750  switch (s->compression) {
1751  case EXR_RAW:
1752  case EXR_RLE:
1753  case EXR_ZIP1:
1754  s->scan_lines_per_block = 1;
1755  break;
1756  case EXR_PXR24:
1757  case EXR_ZIP16:
1758  s->scan_lines_per_block = 16;
1759  break;
1760  case EXR_PIZ:
1761  case EXR_B44:
1762  case EXR_B44A:
1763  s->scan_lines_per_block = 32;
1764  break;
1765  default:
1766  avpriv_report_missing_feature(avctx, "Compression %d", s->compression);
1767  return AVERROR_PATCHWELCOME;
1768  }
1769 
1770  /* Verify the xmin, xmax, ymin and ymax before setting the actual image size.
1771  * It's possible for the data window can larger or outside the display window */
1772  if (s->xmin > s->xmax || s->ymin > s->ymax ||
1773  s->ydelta == 0xFFFFFFFF || s->xdelta == 0xFFFFFFFF) {
1774  av_log(avctx, AV_LOG_ERROR, "Wrong or missing size information.\n");
1775  return AVERROR_INVALIDDATA;
1776  }
1777 
1778  if ((ret = ff_set_dimensions(avctx, s->w, s->h)) < 0)
1779  return ret;
1780 
1781  s->desc = av_pix_fmt_desc_get(avctx->pix_fmt);
1782  if (!s->desc)
1783  return AVERROR_INVALIDDATA;
1784 
1785  if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) {
1786  planes = s->desc->nb_components;
1787  out_line_size = avctx->width * 4;
1788  } else {
1789  planes = 1;
1790  out_line_size = avctx->width * 2 * s->desc->nb_components;
1791  }
1792 
1793  if (s->is_tile) {
1794  nb_blocks = ((s->xdelta + s->tile_attr.xSize - 1) / s->tile_attr.xSize) *
1795  ((s->ydelta + s->tile_attr.ySize - 1) / s->tile_attr.ySize);
1796  } else { /* scanline */
1797  nb_blocks = (s->ydelta + s->scan_lines_per_block - 1) /
1799  }
1800 
1801  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
1802  return ret;
1803 
1804  if (bytestream2_get_bytes_left(&s->gb)/8 < nb_blocks)
1805  return AVERROR_INVALIDDATA;
1806 
1807  // check offset table and recreate it if need
1808  if (!s->is_tile && bytestream2_peek_le64(&s->gb) == 0) {
1809  av_log(s->avctx, AV_LOG_DEBUG, "recreating invalid scanline offset table\n");
1810 
1811  start_offset_table = bytestream2_tell(&s->gb);
1812  start_next_scanline = start_offset_table + nb_blocks * 8;
1813  bytestream2_init_writer(&offset_table_writer, &avpkt->data[start_offset_table], nb_blocks * 8);
1814 
1815  for (y = 0; y < nb_blocks; y++) {
1816  /* write offset of prev scanline in offset table */
1817  bytestream2_put_le64(&offset_table_writer, start_next_scanline);
1818 
1819  /* get len of next scanline */
1820  bytestream2_seek(&s->gb, start_next_scanline + 4, SEEK_SET);/* skip line number */
1821  start_next_scanline += (bytestream2_get_le32(&s->gb) + 8);
1822  }
1823  bytestream2_seek(&s->gb, start_offset_table, SEEK_SET);
1824  }
1825 
1826  // save pointer we are going to use in decode_block
1827  s->buf = avpkt->data;
1828  s->buf_size = avpkt->size;
1829 
1830  // Zero out the start if ymin is not 0
1831  for (i = 0; i < planes; i++) {
1832  ptr = picture->data[i];
1833  for (y = 0; y < s->ymin; y++) {
1834  memset(ptr, 0, out_line_size);
1835  ptr += picture->linesize[i];
1836  }
1837  }
1838 
1839  s->picture = picture;
1840 
1841  avctx->execute2(avctx, decode_block, s->thread_data, NULL, nb_blocks);
1842 
1843  ymax = FFMAX(0, s->ymax + 1);
1844  // Zero out the end if ymax+1 is not h
1845  for (i = 0; i < planes; i++) {
1846  ptr = picture->data[i] + (ymax * picture->linesize[i]);
1847  for (y = ymax; y < avctx->height; y++) {
1848  memset(ptr, 0, out_line_size);
1849  ptr += picture->linesize[i];
1850  }
1851  }
1852 
1853  picture->pict_type = AV_PICTURE_TYPE_I;
1854  *got_frame = 1;
1855 
1856  return avpkt->size;
1857 }
1858 
1860 {
1861  EXRContext *s = avctx->priv_data;
1862  uint32_t i;
1863  union av_intfloat32 t;
1864  float one_gamma = 1.0f / s->gamma;
1865  avpriv_trc_function trc_func = NULL;
1866 
1867  s->avctx = avctx;
1868 
1869  ff_exrdsp_init(&s->dsp);
1870 
1871 #if HAVE_BIGENDIAN
1872  ff_bswapdsp_init(&s->bbdsp);
1873 #endif
1874 
1876  if (trc_func) {
1877  for (i = 0; i < 65536; ++i) {
1878  t = exr_half2float(i);
1879  t.f = trc_func(t.f);
1880  s->gamma_table[i] = t;
1881  }
1882  } else {
1883  if (one_gamma > 0.9999f && one_gamma < 1.0001f) {
1884  for (i = 0; i < 65536; ++i) {
1885  s->gamma_table[i] = exr_half2float(i);
1886  }
1887  } else {
1888  for (i = 0; i < 65536; ++i) {
1889  t = exr_half2float(i);
1890  /* If negative value we reuse half value */
1891  if (t.f <= 0.0f) {
1892  s->gamma_table[i] = t;
1893  } else {
1894  t.f = powf(t.f, one_gamma);
1895  s->gamma_table[i] = t;
1896  }
1897  }
1898  }
1899  }
1900 
1901  // allocate thread data, used for non EXR_RAW compression types
1903  if (!s->thread_data)
1904  return AVERROR_INVALIDDATA;
1905 
1906  return 0;
1907 }
1908 
1910 {
1911  EXRContext *s = avctx->priv_data;
1912  int i;
1913  for (i = 0; i < avctx->thread_count; i++) {
1914  EXRThreadData *td = &s->thread_data[i];
1916  av_freep(&td->tmp);
1917  av_freep(&td->bitmap);
1918  av_freep(&td->lut);
1919  }
1920 
1921  av_freep(&s->thread_data);
1922  av_freep(&s->channels);
1923 
1924  return 0;
1925 }
1926 
1927 #define OFFSET(x) offsetof(EXRContext, x)
1928 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1929 static const AVOption options[] = {
1930  { "layer", "Set the decoding layer", OFFSET(layer),
1931  AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1932  { "gamma", "Set the float gamma value when decoding", OFFSET(gamma),
1933  AV_OPT_TYPE_FLOAT, { .dbl = 1.0f }, 0.001, FLT_MAX, VD },
1934 
1935  // XXX: Note the abuse of the enum using AVCOL_TRC_UNSPECIFIED to subsume the existing gamma option
1936  { "apply_trc", "color transfer characteristics to apply to EXR linear input", OFFSET(apply_trc_type),
1937  AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_UNSPECIFIED }, 1, AVCOL_TRC_NB-1, VD, "apply_trc_type"},
1938  { "bt709", "BT.709", 0,
1939  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1940  { "gamma", "gamma", 0,
1941  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_UNSPECIFIED }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1942  { "gamma22", "BT.470 M", 0,
1943  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA22 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1944  { "gamma28", "BT.470 BG", 0,
1945  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA28 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1946  { "smpte170m", "SMPTE 170 M", 0,
1947  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE170M }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1948  { "smpte240m", "SMPTE 240 M", 0,
1949  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE240M }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1950  { "linear", "Linear", 0,
1951  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LINEAR }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1952  { "log", "Log", 0,
1953  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1954  { "log_sqrt", "Log square root", 0,
1955  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG_SQRT }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1956  { "iec61966_2_4", "IEC 61966-2-4", 0,
1957  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_4 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1958  { "bt1361", "BT.1361", 0,
1959  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT1361_ECG }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1960  { "iec61966_2_1", "IEC 61966-2-1", 0,
1961  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1962  { "bt2020_10bit", "BT.2020 - 10 bit", 0,
1963  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1964  { "bt2020_12bit", "BT.2020 - 12 bit", 0,
1965  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1966  { "smpte2084", "SMPTE ST 2084", 0,
1967  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST2084 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1968  { "smpte428_1", "SMPTE ST 428-1", 0,
1969  AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST428_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"},
1970 
1971  { NULL },
1972 };
1973 
1974 static const AVClass exr_class = {
1975  .class_name = "EXR",
1976  .item_name = av_default_item_name,
1977  .option = options,
1978  .version = LIBAVUTIL_VERSION_INT,
1979 };
1980 
1982  .name = "exr",
1983  .long_name = NULL_IF_CONFIG_SMALL("OpenEXR image"),
1984  .type = AVMEDIA_TYPE_VIDEO,
1985  .id = AV_CODEC_ID_EXR,
1986  .priv_data_size = sizeof(EXRContext),
1987  .init = decode_init,
1988  .close = decode_end,
1989  .decode = decode_frame,
1990  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1992  .priv_class = &exr_class,
1993 };
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:499
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
version
Definition: libkvazaar.c:317
static int shift(int a, int b)
Definition: sonic.c:82
IEC 61966-2-4.
Definition: pixfmt.h:495
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static int decode_header(EXRContext *s, AVFrame *frame)
Definition: exr.c:1310
AVOption.
Definition: opt.h:248
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:492
Definition: exr.c:66
static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
Definition: exr.c:294
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
Definition: exr.c:61
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
Definition: intfloat.h:40
int channel_offsets[4]
Definition: exr.c:133
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
int buf_size
Definition: exr.c:150
int * p
Definition: exr.c:327
static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
Definition: exr.c:244
static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
Definition: exr.c:775
const char * layer
Definition: exr.c:158
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int32_t ymax
Definition: exr.c:138
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
enum ExrPixelType pixel_type
Definition: exr.c:132
const char * key
uint64_t_TMPL AV_RL64
Definition: bytestream.h:91
static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: exr.c:987
#define HALF_FLOAT_MAX_BIASED_EXP
Definition: exr.c:175
uint8_t * bitmap
Definition: exr.c:113
AVCodec.
Definition: codec.h:190
uint8_t * tmp
Definition: exr.c:110
int w
Definition: exr.c:136
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
ExrDSPContext dsp
Definition: exr.c:125
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:483
int lit
Definition: exr.c:326
void(* predictor)(uint8_t *src, ptrdiff_t size)
Definition: exrdsp.h:27
#define VD
Definition: exr.c:1928
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * buf
Definition: exr.c:149
Definition: exr.c:324
float gamma
Definition: exr.c:161
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx)
Definition: exr.c:611
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
#define HUF_ENCSIZE
Definition: exr.c:320
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:190
#define get_code(po, rlc, c, lc, gb, out, oe, outb)
Definition: exr.c:441
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: exr.c:76
Multithreading support functions.
#define OFFSET(x)
Definition: exr.c:1927
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
static int huf_uncompress(GetByteContext *gb, uint16_t *dst, int dst_size)
Definition: exr.c:533
uint32_t xdelta
Definition: exr.c:139
static int huf_build_dec_table(const uint64_t *hcode, int im, int iM, HufDec *hdecod)
Definition: exr.c:398
#define get_char(c, lc, gb)
Definition: exr.c:435
Definition: exr.c:94
Definition: exr.c:63
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
const uint8_t * buffer
Definition: bytestream.h:34
#define FFMIN3(a, b, c)
Definition: common.h:97
static const AVOption options[]
Definition: exr.c:1929
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:121
AVFrame * picture
Definition: exr.c:123
bitstream reader API header.
AVDictionary * metadata
metadata.
Definition: frame.h:594
GetByteContext gb
Definition: exr.c:148
uint32_t ydelta
Definition: exr.c:139
#define av_log(a,...)
uint8_t * uncompressed_data
Definition: exr.c:107
Definition: exr.c:68
#define A_OFFSET
Definition: exr.c:598
static int huf_decode(const uint64_t *hcode, const HufDec *hdecod, GetByteContext *gb, int nbits, int rlc, int no, uint16_t *out)
Definition: exr.c:464
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
Definition: exr.c:70
#define src
Definition: vp8dsp.c:254
static const struct @323 planes[]
#define FLOAT_MAX_BIASED_EXP
Definition: exr.c:173
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define td
Definition: regdef.h:70
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:496
int h
Definition: exr.c:136
static av_cold int decode_init(AVCodecContext *avctx)
Definition: exr.c:1859
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
int32_t ymin
Definition: exr.c:138
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
AVCodecContext * avctx
Definition: exr.c:124
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YA16
Definition: pixfmt.h:384
Definition: graph2dot.c:48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:385
enum AVColorTransferCharacteristic apply_trc_type
Definition: exr.c:160
simple assert() macros that are a bit more flexible than ISO C assert().
enum ExrPixelType pixel_type
Definition: exr.c:96
int nb_channels
Definition: exr.c:153
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define LONG_ZEROCODE_RUN
Definition: exr.c:354
GLsizei count
Definition: opengl_enc.c:108
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:123
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
int8_t exp
Definition: eval.c:72
AVCodec ff_exr_decoder
Definition: exr.c:1981
int current_channel_offset
Definition: exr.c:154
#define powf(x, y)
Definition: libm.h:50
EXRThreadData * thread_data
Definition: exr.c:156
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
int32_t xmin
Definition: exr.c:137
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
Definition: exr.c:67
static void unpack_3(const uint8_t b[3], uint16_t s[16])
Definition: exr.c:894
static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
Definition: exr.c:227
int is_luma
Definition: exr.c:146
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define b
Definition: input.c:41
Definition: exr.c:69
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
#define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP
Definition: exr.c:166
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
Definition: exr.c:65
int xsub
Definition: exr.c:95
#define FFMIN(a, b)
Definition: common.h:96
int len
Definition: exr.c:325
int32_t xSize
Definition: exr.c:100
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:215
#define HUF_DECSIZE
Definition: exr.c:321
int width
picture width / height.
Definition: avcodec.h:699
enum ExrCompr compression
Definition: exr.c:131
EXRTileAttribute tile_attr
Definition: exr.c:143
int tmp_size
Definition: exr.c:111
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
static void unpack_14(const uint8_t b[14], uint16_t s[16])
Definition: exr.c:859
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t * lut
Definition: exr.c:114
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
uint32_t i
Definition: intfloat.h:28
avpriv_trc_function avpriv_get_trc_function_from_trc(enum AVColorTransferCharacteristic trc)
Determine the function needed to apply the given AVColorTransferCharacteristic to linear input...
Definition: color_utils.c:170
Definition: exr.c:77
EXRChannel * channels
Definition: exr.c:152
int uncompressed_size
Definition: exr.c:108
#define HUF_DECBITS
Definition: exr.c:318
int32_t xmax
Definition: exr.c:137
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1785
enum ExrTileLevelMode level_mode
Definition: exr.c:102
#define SHORTEST_LONG_RUN
Definition: exr.c:355
if(ret)
static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length)
Check if the variable name corresponds to its data type.
Definition: exr.c:1281
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
int ysub
Definition: exr.c:95
static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
Definition: exr.c:910
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define HUF_DECMASK
Definition: exr.c:322
int ysize
Definition: exr.c:116
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1845
also ITU-R BT1361
Definition: pixfmt.h:485
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:490
Libavcodec external API header.
ExrCompr
Definition: exr.c:60
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:431
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:526
#define BITMAP_SIZE
Definition: exr.c:292
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
Definition: exr.c:64
int is_tile
Definition: exr.c:144
float im
Definition: fft.c:82
Not part of ABI.
Definition: pixfmt.h:505
"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
Definition: pixfmt.h:494
ExrPixelType
Definition: exr.c:74
Definition: exr.c:62
static av_cold int decode_end(AVCodecContext *avctx)
Definition: exr.c:1909
uint8_t pixel
Definition: tiny_ssim.c:42
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define flags(name, subs,...)
Definition: cbs_av1.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
#define SHORT_ZEROCODE_RUN
Definition: exr.c:353
int scan_lines_per_block
Definition: exr.c:141
static union av_intfloat32 exr_half2float(uint16_t hf)
Convert a half float as a uint16_t into a full float.
Definition: exr.c:184
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:428
int
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:429
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:497
common internal api header.
common internal and external API header
int channel_line_size
Definition: exr.c:118
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
also ITU-R BT470BG
Definition: pixfmt.h:489
Definition: exr.c:75
#define MOD_MASK
Definition: exr.c:599
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
ExrTileLevelRound
Definition: exr.c:88
static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td)
Definition: exr.c:690
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static void huf_canonical_code_table(uint64_t *hcode)
Definition: exr.c:330
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:498
static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize)
Definition: exr.c:309
FILE * out
Definition: movenc.c:54
#define av_freep(p)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: exr.c:1688
static void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
Definition: exr.c:584
av_cold void ff_exrdsp_init(ExrDSPContext *c)
Definition: exrdsp.c:49
int xsize
Definition: exr.c:116
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
Definition: exr.c:601
static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *hcode)
Definition: exr.c:358
const AVPixFmtDescriptor * desc
Definition: exr.c:134
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
void(* reorder_pixels)(uint8_t *dst, const uint8_t *src, ptrdiff_t size)
Definition: exrdsp.h:26
This structure stores compressed data.
Definition: packet.h:340
Definition: rpzaenc.c:58
ExrTileLevelMode
Definition: exr.c:81
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
Definition: exr.c:71
static const AVClass exr_class
Definition: exr.c:1974
int i
Definition: input.c:407
#define USHORT_RANGE
Definition: exr.c:291
union av_intfloat32 gamma_table[65536]
Definition: exr.c:162
enum ExrTileLevelRound level_round
Definition: exr.c:103
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
"Logarithmic transfer characteristic (100:1 range)"
Definition: pixfmt.h:493
double(* avpriv_trc_function)(double)
Definition: color_utils.h:40
int32_t ySize
Definition: exr.c:101
static uint8_t tmp[11]
Definition: aes_ctr.c:26