FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
40 #include "libavutil/avstring.h"
41 #include "libavutil/error.h"
42 #include "libavutil/intreadwrite.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/reverse.h"
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "codec_internal.h"
49 #include "decode.h"
50 #include "exif_internal.h"
51 #include "faxcompr.h"
52 #include "lzw.h"
53 #include "tiff.h"
54 #include "tiff_common.h"
55 #include "tiff_data.h"
56 #include "mjpegdec.h"
57 #include "thread.h"
58 #include "get_bits.h"
59 
60 typedef struct TiffContext {
61  AVClass *class;
64 
65  /* JPEG decoding for DNG */
66  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
67  AVPacket *jpkt; // encoded JPEG tile
68  AVFrame *jpgframe; // decoded JPEG tile
69 
71  uint16_t get_page;
73 
75  int width, height;
76  unsigned int bpp, bppcount;
77  uint32_t palette[256];
79  int le;
82  int planar;
83  int subsampling[2];
84  int fax_opts;
85  int predictor;
87  uint32_t res[4];
89  unsigned last_tag;
90 
91  int is_bayer;
93  uint8_t pattern[4];
94 
95  float analog_balance[4];
96  float as_shot_neutral[4];
97  float as_shot_white[4];
98  float color_matrix[3][4];
99  float camera_calibration[4][4];
100  float premultiply[4];
101  float black_level[4];
102 
103  unsigned white_level;
104  uint16_t dng_lut[65536];
105 
106  uint32_t sub_ifd;
107  uint16_t cur_page;
108 
110  int sot;
113 
114  /* Tile support */
115  int is_tiled;
118 
119  int is_jpeg;
120 
121  uint8_t *deinvert_buf;
123  uint8_t *yuv_line;
124  unsigned int yuv_line_size;
125 
128 
130 } TiffContext;
131 
132 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
133 
134 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
135  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
136  s->tiff_type = tiff_type;
137 }
138 
139 static void free_geotags(TiffContext *const s)
140 {
141  for (int i = 0; i < s->geotag_count; i++)
142  av_freep(&s->geotags[i].val);
143  av_freep(&s->geotags);
144  s->geotag_count = 0;
145 }
146 
147 static const char *get_geokey_name(int key)
148 {
149 #define RET_GEOKEY_STR(TYPE, array)\
150  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
151  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
152  return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
153 
154  RET_GEOKEY_STR(VERT, vert);
155  RET_GEOKEY_STR(PROJ, proj);
156  RET_GEOKEY_STR(GEOG, geog);
157  RET_GEOKEY_STR(CONF, conf);
158 
159  return NULL;
160 }
161 
162 static int get_geokey_type(int key)
163 {
164 #define RET_GEOKEY_TYPE(TYPE, array)\
165  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
166  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
167  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
168  RET_GEOKEY_TYPE(VERT, vert);
169  RET_GEOKEY_TYPE(PROJ, proj);
170  RET_GEOKEY_TYPE(GEOG, geog);
171  RET_GEOKEY_TYPE(CONF, conf);
172 
173  return AVERROR_INVALIDDATA;
174 }
175 
176 static int cmp_id_key(const void *id, const void *k)
177 {
178  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
179 }
180 
181 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
182 {
183  const TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
184  if(r)
185  return r->name;
186 
187  return NULL;
188 }
189 
190 static const char *get_geokey_val(int key, uint16_t val)
191 {
193  return "undefined";
195  return "User-Defined";
196 
197 #define RET_GEOKEY_VAL(TYPE, array)\
198  if (val >= TIFF_##TYPE##_OFFSET &&\
199  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
200  return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
201 
202  switch (key) {
204  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
205  break;
207  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
208  break;
212  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
213  break;
216  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
217  break;
219  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
220  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
221  break;
223  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
224  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
225  break;
227  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
228  break;
230  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
231  break;
237  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
238  break;
240  RET_GEOKEY_VAL(VERT_CS, vert_cs);
241  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
242  break;
243 
244  }
245 
246  return NULL;
247 }
248 
249 static char *doubles2str(double *dp, int count, const char *sep)
250 {
251  int i;
252  char *ap, *ap0;
253  uint64_t component_len;
254  if (!sep) sep = ", ";
255  component_len = 24LL + strlen(sep);
256  if (count >= (INT_MAX - 1)/component_len)
257  return NULL;
258  ap = av_malloc(component_len * count + 1);
259  if (!ap)
260  return NULL;
261  ap0 = ap;
262  ap[0] = '\0';
263  for (i = 0; i < count; i++) {
264  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
265  if(l >= component_len) {
266  av_free(ap0);
267  return NULL;
268  }
269  ap += l;
270  }
271  ap0[strlen(ap0) - strlen(sep)] = '\0';
272  return ap0;
273 }
274 
275 static int add_metadata(int count, int type,
276  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
277 {
278  switch(type) {
279  case AV_TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
280  case AV_TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
281  case AV_TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
282  default : return AVERROR_INVALIDDATA;
283  };
284 }
285 
286 /**
287  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
288  */
289 static uint16_t av_always_inline dng_process_color16(uint16_t value,
290  const uint16_t *lut,
291  float black_level,
292  float scale_factor)
293 {
294  float value_norm;
295 
296  // Lookup table lookup
297  value = lut[value];
298 
299  // Black level subtraction
300  // Color scaling
301  value_norm = ((float)value - black_level) * scale_factor;
302 
303  value = av_clip_uint16(lrintf(value_norm));
304 
305  return value;
306 }
307 
308 static uint16_t av_always_inline dng_process_color8(uint16_t value,
309  const uint16_t *lut,
310  float black_level,
311  float scale_factor)
312 {
313  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
314 }
315 
316 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
317  const uint8_t *src, int src_stride, int width, int height,
318  int is_single_comp, int is_u16, int odd_line)
319 {
320  float scale_factor[4];
321  int line, col;
322 
323  if (s->is_bayer) {
324  for (int i = 0; i < 4; i++)
325  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
326  } else {
327  for (int i = 0; i < 4; i++)
328  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
329  }
330 
331  if (is_single_comp) {
332  if (!is_u16)
333  return; /* <= 8bpp unsupported */
334 
335  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
336  (split vertically in the middle). */
337  for (line = 0; line < height / 2; line++) {
338  uint16_t *dst_u16 = (uint16_t *)dst;
339  const uint16_t *src_u16 = (const uint16_t *)src;
340 
341  /* Blit first half of input row row to initial row of output */
342  for (col = 0; col < width; col++)
343  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
344 
345  /* Advance the destination pointer by a row (source pointer remains in the same place) */
346  dst += dst_stride * sizeof(uint16_t);
347  dst_u16 = (uint16_t *)dst;
348 
349  /* Blit second half of input row row to next row of output */
350  for (col = 0; col < width; col++)
351  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
352 
353  dst += dst_stride * sizeof(uint16_t);
354  src += src_stride * sizeof(uint16_t);
355  }
356  } else {
357  /* Input and output image are the same size and the MJpeg decoder has done per-component
358  deinterleaving, so blitting here is straightforward. */
359  if (is_u16) {
360  for (line = 0; line < height; line++) {
361  uint16_t *dst_u16 = (uint16_t *)dst;
362  const uint16_t *src_u16 = (const uint16_t *)src;
363 
364  for (col = 0; col < width; col++)
365  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
366  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
367  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
368 
369  dst += dst_stride * sizeof(uint16_t);
370  src += src_stride * sizeof(uint16_t);
371  }
372  } else {
373  for (line = 0; line < height; line++) {
374  uint8_t *dst_u8 = dst;
375  const uint8_t *src_u8 = src;
376 
377  for (col = 0; col < width; col++)
378  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
379  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
380  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
381 
382  dst += dst_stride;
383  src += src_stride;
384  }
385  }
386  }
387 }
388 
390  unsigned int bpp, uint8_t* dst,
391  int usePtr, const uint8_t *src,
392  uint8_t c, int width, int offset)
393 {
394  switch (bpp) {
395  case 1:
396  while (--width >= 0) {
397  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
398  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
399  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
400  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
401  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
402  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
403  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
404  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
405  }
406  break;
407  case 2:
408  while (--width >= 0) {
409  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
410  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
411  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
412  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
413  }
414  break;
415  case 4:
416  while (--width >= 0) {
417  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
418  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
419  }
420  break;
421  case 10:
422  case 12:
423  case 14: {
424  uint16_t *dst16 = (uint16_t *)dst;
425  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
426  uint8_t shift = is_dng ? 0 : 16 - bpp;
427  GetBitContext gb;
428 
429  av_unused int ret = init_get_bits8(&gb, src, width);
430  av_assert1(ret >= 0);
431  for (int i = 0; i < s->width; i++) {
432  dst16[i] = get_bits(&gb, bpp) << shift;
433  }
434  }
435  break;
436  default:
437  if (usePtr) {
438  memcpy(dst + offset, src, width);
439  } else {
440  memset(dst + offset, c, width);
441  }
442  }
443 }
444 
445 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
446 {
447  int i;
448 
449  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
450  if (!s->deinvert_buf)
451  return AVERROR(ENOMEM);
452  for (i = 0; i < size; i++)
453  s->deinvert_buf[i] = ff_reverse[src[i]];
454 
455  return 0;
456 }
457 
459  const uint8_t *src, int lnum, int width, int bpp)
460 {
461  GetBitContext gb;
462  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
463 
464  av_unused int ret = init_get_bits8(&gb, src, width);
465  av_assert1(ret >= 0);
466 
467  for (int i = 0; i < s->width; i++) {
468  dst[i] = get_bits(&gb, bpp);
469  }
470 }
471 
473  const uint8_t *src, int lnum)
474 {
475  int i, j, k;
476  int w = (s->width - 1) / s->subsampling[0] + 1;
477  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
478  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
479  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
480  for (i = 0; i < w; i++) {
481  for (j = 0; j < s->subsampling[1]; j++)
482  for (k = 0; k < s->subsampling[0]; k++)
483  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
484  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
485  *pu++ = *src++;
486  *pv++ = *src++;
487  }
488  }else{
489  for (i = 0; i < w; i++) {
490  for (j = 0; j < s->subsampling[1]; j++)
491  for (k = 0; k < s->subsampling[0]; k++)
492  p->data[0][(lnum + j) * p->linesize[0] +
493  i * s->subsampling[0] + k] = *src++;
494  *pu++ = *src++;
495  *pv++ = *src++;
496  }
497  }
498 }
499 
500 #if CONFIG_ZLIB
501 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
502  int size)
503 {
504  z_stream zstream = { 0 };
505  int zret;
506 
507  zstream.next_in = src;
508  zstream.avail_in = size;
509  zstream.next_out = dst;
510  zstream.avail_out = *len;
511  zret = inflateInit(&zstream);
512  if (zret != Z_OK) {
513  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
514  return zret;
515  }
516  zret = inflate(&zstream, Z_SYNC_FLUSH);
517  inflateEnd(&zstream);
518  *len = zstream.total_out;
519  return zret == Z_STREAM_END ? Z_OK : zret;
520 }
521 
522 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
523  const uint8_t *src, int size, int width, int lines,
524  int strip_start, int is_yuv)
525 {
526  uint8_t *zbuf;
527  unsigned long outlen;
528  int ret, line;
529  outlen = width * lines;
530  zbuf = av_malloc(outlen);
531  if (!zbuf)
532  return AVERROR(ENOMEM);
533  if (s->fill_order) {
534  if ((ret = deinvert_buffer(s, src, size)) < 0) {
535  av_free(zbuf);
536  return ret;
537  }
538  src = s->deinvert_buf;
539  }
540  ret = tiff_uncompress(zbuf, &outlen, src, size);
541  if (ret != Z_OK) {
542  av_log(s->avctx, AV_LOG_ERROR,
543  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
544  (unsigned long)width * lines, ret);
545  av_free(zbuf);
546  return AVERROR_UNKNOWN;
547  }
548  src = zbuf;
549  for (line = 0; line < lines; line++) {
550  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
551  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
552  } else {
553  memcpy(dst, src, width);
554  }
555  if (is_yuv) {
556  unpack_yuv(s, p, dst, strip_start + line);
557  line += s->subsampling[1] - 1;
558  }
559  dst += stride;
560  src += width;
561  }
562  av_free(zbuf);
563  return 0;
564 }
565 #endif
566 
567 #if CONFIG_LZMA
568 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
569  int size)
570 {
571  lzma_stream stream = LZMA_STREAM_INIT;
572  lzma_ret ret;
573 
574  stream.next_in = src;
575  stream.avail_in = size;
576  stream.next_out = dst;
577  stream.avail_out = *len;
578  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
579  if (ret != LZMA_OK) {
580  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
581  return ret;
582  }
583  ret = lzma_code(&stream, LZMA_RUN);
584  lzma_end(&stream);
585  *len = stream.total_out;
586  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
587 }
588 
589 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
590  const uint8_t *src, int size, int width, int lines,
591  int strip_start, int is_yuv)
592 {
593  uint64_t outlen = width * (uint64_t)lines;
594  int ret, line;
595  uint8_t *buf = av_malloc(outlen);
596  if (!buf)
597  return AVERROR(ENOMEM);
598  if (s->fill_order) {
599  if ((ret = deinvert_buffer(s, src, size)) < 0) {
600  av_free(buf);
601  return ret;
602  }
603  src = s->deinvert_buf;
604  }
605  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
606  if (ret != LZMA_OK) {
607  av_log(s->avctx, AV_LOG_ERROR,
608  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
609  (uint64_t)width * lines, ret);
610  av_free(buf);
611  return AVERROR_UNKNOWN;
612  }
613  src = buf;
614  for (line = 0; line < lines; line++) {
615  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
616  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
617  } else {
618  memcpy(dst, src, width);
619  }
620  if (is_yuv) {
621  unpack_yuv(s, p, dst, strip_start + line);
622  line += s->subsampling[1] - 1;
623  }
624  dst += stride;
625  src += width;
626  }
627  av_free(buf);
628  return 0;
629 }
630 #endif
631 
632 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
633  const uint8_t *src, int size, int width, int lines)
634 {
635  int line;
636  int ret;
637 
638  if (s->fill_order) {
639  if ((ret = deinvert_buffer(s, src, size)) < 0)
640  return ret;
641  src = s->deinvert_buf;
642  }
643  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
644  s->compr, s->fax_opts);
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  for (line = 0; line < lines; line++) {
647  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
648  dst += stride;
649  }
650  return ret;
651 }
652 
654  int tile_byte_count, int dst_x, int dst_y, int w, int h)
655 {
656  TiffContext *s = avctx->priv_data;
657  uint8_t *dst_data, *src_data;
658  uint32_t dst_offset; /* offset from dst buffer in pixels */
659  int is_single_comp, is_u16, pixel_size;
660  int ret;
661 
662  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
663  return AVERROR_INVALIDDATA;
664 
665  /* Prepare a packet and send to the MJPEG decoder */
666  av_packet_unref(s->jpkt);
667  s->jpkt->data = (uint8_t*)s->gb.buffer;
668  s->jpkt->size = tile_byte_count;
669 
670  if (s->is_bayer) {
671  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
672  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
673  image or not from its own data (and we need that information when decoding it). */
674  mjpegdecctx->bayer = 1;
675  }
676 
677  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
678  if (ret < 0) {
679  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
680  return ret;
681  }
682 
683  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
684  if (ret < 0) {
685  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
686 
687  /* Normally skip, error if explode */
688  if (avctx->err_recognition & AV_EF_EXPLODE)
689  return AVERROR_INVALIDDATA;
690  else
691  return 0;
692  }
693 
694  is_u16 = (s->bpp > 8);
695 
696  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
697 
698  if (s->jpgframe->width != s->avctx_mjpeg->width ||
699  s->jpgframe->height != s->avctx_mjpeg->height ||
700  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
701  return AVERROR_INVALIDDATA;
702 
703  /* See dng_blit for explanation */
704  if (s->avctx_mjpeg->width == w * 2 &&
705  s->avctx_mjpeg->height == h / 2 &&
706  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
707  is_single_comp = 1;
708  } else if (s->avctx_mjpeg->width >= w &&
709  s->avctx_mjpeg->height >= h &&
710  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
711  ) {
712  is_single_comp = 0;
713  } else
714  return AVERROR_INVALIDDATA;
715 
716  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
717 
718  if (is_single_comp && !is_u16) {
719  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
720  av_frame_unref(s->jpgframe);
721  return AVERROR_PATCHWELCOME;
722  }
723 
724  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
725  dst_data = frame->data[0] + dst_offset * pixel_size;
726  src_data = s->jpgframe->data[0];
727 
728  dng_blit(s,
729  dst_data,
730  frame->linesize[0] / pixel_size,
731  src_data,
732  s->jpgframe->linesize[0] / pixel_size,
733  w,
734  h,
735  is_single_comp,
736  is_u16, 0);
737 
738  av_frame_unref(s->jpgframe);
739 
740  return 0;
741 }
742 
743 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
744  const uint8_t *src, int size, int strip_start, int lines)
745 {
746  PutByteContext pb;
747  int c, line, pixels, code, ret;
748  const uint8_t *ssrc = src;
749  int width = ((s->width * s->bpp) + 7) >> 3;
750  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(p->format);
751  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
752  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
753  desc->nb_components >= 3;
754  int is_dng;
755 
756  if (s->planar)
757  width /= s->bppcount;
758 
759  if (size <= 0)
760  return AVERROR_INVALIDDATA;
761 
762  if (is_yuv) {
763  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
764  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
765  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
766  if (s->yuv_line == NULL) {
767  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
768  return AVERROR(ENOMEM);
769  }
770  dst = s->yuv_line;
771  stride = 0;
772 
773  width = (s->width - 1) / s->subsampling[0] + 1;
774  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
775  av_assert0(width <= bytes_per_row);
776  av_assert0(s->bpp == 24);
777  }
778  if (s->is_bayer) {
779  av_assert0(width == (s->bpp * s->width + 7) >> 3);
780  }
781  av_assert0(!(s->is_bayer && is_yuv));
782  if (p->format == AV_PIX_FMT_GRAY12) {
783  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
784  if (s->yuv_line == NULL) {
785  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
786  return AVERROR(ENOMEM);
787  }
788  dst = s->yuv_line;
789  stride = 0;
790  }
791 
792  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
793 #if CONFIG_ZLIB
794  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
795  strip_start, is_yuv);
796 #else
797  av_log(s->avctx, AV_LOG_ERROR,
798  "zlib support not enabled, "
799  "deflate compression not supported\n");
800  return AVERROR(ENOSYS);
801 #endif
802  }
803  if (s->compr == TIFF_LZMA) {
804 #if CONFIG_LZMA
805  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
806  strip_start, is_yuv);
807 #else
808  av_log(s->avctx, AV_LOG_ERROR,
809  "LZMA support not enabled\n");
810  return AVERROR(ENOSYS);
811 #endif
812  }
813  if (s->compr == TIFF_LZW) {
814  if (s->fill_order) {
815  if ((ret = deinvert_buffer(s, src, size)) < 0)
816  return ret;
817  ssrc = src = s->deinvert_buf;
818  }
819  if (size > 1 && !src[0] && (src[1]&1)) {
820  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
821  }
822  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
823  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
824  return ret;
825  }
826  for (line = 0; line < lines; line++) {
827  pixels = ff_lzw_decode(s->lzw, dst, width);
828  if (pixels < width) {
829  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
830  pixels, width);
831  return AVERROR_INVALIDDATA;
832  }
833  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
834  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
835  if (is_yuv) {
836  unpack_yuv(s, p, dst, strip_start + line);
837  line += s->subsampling[1] - 1;
838  } else if (p->format == AV_PIX_FMT_GRAY12) {
839  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
840  }
841  dst += stride;
842  }
843  return 0;
844  }
845  if (s->compr == TIFF_CCITT_RLE ||
846  s->compr == TIFF_G3 ||
847  s->compr == TIFF_G4) {
848  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
849  return AVERROR_INVALIDDATA;
850 
851  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
852  }
853 
854  bytestream2_init(&s->gb, src, size);
855  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
856 
857  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
858 
859  /* Decode JPEG-encoded DNGs with strips */
860  if (s->compr == TIFF_NEWJPEG && is_dng) {
861  if (s->strips > 1) {
862  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
863  return AVERROR_PATCHWELCOME;
864  }
865  if (!s->is_bayer)
866  return AVERROR_PATCHWELCOME;
867  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
868  return ret;
869  return 0;
870  }
871 
872  if (is_dng && stride == 0)
873  return AVERROR_INVALIDDATA;
874 
875  for (line = 0; line < lines; line++) {
876  if (src - ssrc > size) {
877  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
878  return AVERROR_INVALIDDATA;
879  }
880 
881  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
882  break;
883  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
884  switch (s->compr) {
885  case TIFF_RAW:
886  if (ssrc + size - src < width)
887  return AVERROR_INVALIDDATA;
888 
889  if (!s->fill_order) {
890  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
891  dst, 1, src, 0, width, 0);
892  } else {
893  int i;
894  for (i = 0; i < width; i++)
895  dst[i] = ff_reverse[src[i]];
896  }
897 
898  /* Color processing for DNG images with uncompressed strips (non-tiled) */
899  if (is_dng) {
900  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
901 
902  is_u16 = (s->bpp / s->bppcount > 8);
903  pixel_size_bits = (is_u16 ? 16 : 8);
904  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
905 
906  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
907  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
908  dng_blit(s,
909  dst,
910  0, // no stride, only 1 line
911  dst,
912  0, // no stride, only 1 line
913  elements,
914  1,
915  0, // single-component variation is only preset in JPEG-encoded DNGs
916  is_u16,
917  (line + strip_start)&1);
918  }
919 
920  src += width;
921  break;
922  case TIFF_PACKBITS:
923  for (pixels = 0; pixels < width;) {
924  if (ssrc + size - src < 2) {
925  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
926  return AVERROR_INVALIDDATA;
927  }
928  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
929  if (code >= 0) {
930  code++;
931  if (pixels + code > width ||
932  ssrc + size - src < code) {
933  av_log(s->avctx, AV_LOG_ERROR,
934  "Copy went out of bounds\n");
935  return AVERROR_INVALIDDATA;
936  }
937  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
938  dst, 1, src, 0, code, pixels);
939  src += code;
940  pixels += code;
941  } else if (code != -128) { // -127..-1
942  code = (-code) + 1;
943  if (pixels + code > width) {
944  av_log(s->avctx, AV_LOG_ERROR,
945  "Run went out of bounds\n");
946  return AVERROR_INVALIDDATA;
947  }
948  c = *src++;
949  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
950  dst, 0, NULL, c, code, pixels);
951  pixels += code;
952  }
953  }
954  if (s->fill_order) {
955  int i;
956  for (i = 0; i < width; i++)
957  dst[i] = ff_reverse[dst[i]];
958  }
959  break;
960  }
961  if (is_yuv) {
962  unpack_yuv(s, p, dst, strip_start + line);
963  line += s->subsampling[1] - 1;
964  } else if (p->format == AV_PIX_FMT_GRAY12) {
965  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
966  }
967  dst += stride;
968  }
969  return 0;
970 }
971 
973  const AVPacket *avpkt)
974 {
975  TiffContext *s = avctx->priv_data;
976  int tile_idx;
977  int tile_offset_offset, tile_offset;
978  int tile_byte_count_offset, tile_byte_count;
979  int tile_count_x, tile_count_y;
980  int tile_width, tile_length;
981  int has_width_leftover, has_height_leftover;
982  int tile_x = 0, tile_y = 0;
983  int pos_x = 0, pos_y = 0;
984  int ret;
985 
986  if (s->tile_width <= 0 || s->tile_length <= 0)
987  return AVERROR_INVALIDDATA;
988 
989  has_width_leftover = (s->width % s->tile_width != 0);
990  has_height_leftover = (s->height % s->tile_length != 0);
991 
992  /* Calculate tile counts (round up) */
993  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
994  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
995 
996  /* Iterate over the number of tiles */
997  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
998  tile_x = tile_idx % tile_count_x;
999  tile_y = tile_idx / tile_count_x;
1000 
1001  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
1002  tile_width = s->width % s->tile_width;
1003  else
1004  tile_width = s->tile_width;
1005 
1006  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1007  tile_length = s->height % s->tile_length;
1008  else
1009  tile_length = s->tile_length;
1010 
1011  /* Read tile offset */
1012  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1013  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1014  tile_offset = ff_tget_long(&s->gb, s->le);
1015 
1016  /* Read tile byte size */
1017  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1018  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1019  tile_byte_count = ff_tget_long(&s->gb, s->le);
1020 
1021  /* Seek to tile data */
1022  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1023 
1024  /* Decode JPEG tile and copy it in the reference frame */
1025  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1026 
1027  if (ret < 0)
1028  return ret;
1029 
1030  /* Advance current positions */
1031  pos_x += tile_width;
1032  if (tile_x == tile_count_x - 1) { // If on the right edge
1033  pos_x = 0;
1034  pos_y += tile_length;
1035  }
1036  }
1037 
1038  /* Frame is ready to be output */
1039  frame->pict_type = AV_PICTURE_TYPE_I;
1040  frame->flags |= AV_FRAME_FLAG_KEY;
1041 
1042  return avpkt->size;
1043 }
1044 
1046 {
1047  int ret;
1048  int create_gray_palette = 0;
1049 
1050  // make sure there is no aliasing in the following switch
1051  if (s->bpp > 128 || s->bppcount >= 10) {
1052  av_log(s->avctx, AV_LOG_ERROR,
1053  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1054  s->bpp, s->bppcount);
1055  return AVERROR_INVALIDDATA;
1056  }
1057 
1058  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1059  case 11:
1060  if (!s->palette_is_set) {
1061  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1062  break;
1063  }
1065  case 21:
1066  case 41:
1067  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1068  if (!s->palette_is_set) {
1069  create_gray_palette = 1;
1070  }
1071  break;
1072  case 81:
1073  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1074  break;
1075  case 121:
1076  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1077  break;
1078  case 100081:
1079  switch (AV_RL32(s->pattern)) {
1080  case 0x02010100:
1081  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1082  break;
1083  case 0x00010102:
1084  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1085  break;
1086  case 0x01000201:
1087  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1088  break;
1089  case 0x01020001:
1090  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1091  break;
1092  default:
1093  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1094  AV_RL32(s->pattern));
1095  return AVERROR_PATCHWELCOME;
1096  }
1097  break;
1098  case 100101:
1099  case 100121:
1100  case 100141:
1101  case 100161:
1102  switch (AV_RL32(s->pattern)) {
1103  case 0x02010100:
1104  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1105  break;
1106  case 0x00010102:
1107  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1108  break;
1109  case 0x01000201:
1110  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1111  break;
1112  case 0x01020001:
1113  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1114  break;
1115  default:
1116  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1117  AV_RL32(s->pattern));
1118  return AVERROR_PATCHWELCOME;
1119  }
1120  break;
1121  case 243:
1122  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1123  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1124  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1125  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1126  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1127  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1128  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1129  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1130  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1131  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1132  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1133  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1134  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1135  } else {
1136  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1137  return AVERROR_PATCHWELCOME;
1138  }
1139  } else
1140  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1141  break;
1142  case 161:
1143  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1144  break;
1145  case 162:
1146  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1147  break;
1148  case 322:
1149  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1150  break;
1151  case 324:
1152  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1153  break;
1154  case 405:
1155  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1156  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1157  else {
1158  av_log(s->avctx, AV_LOG_ERROR,
1159  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1160  return AVERROR_PATCHWELCOME;
1161  }
1162  break;
1163  case 483:
1164  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1165  break;
1166  case 644:
1167  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1168  break;
1169  case 10243:
1170  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1171  break;
1172  case 10324:
1173  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1174  break;
1175  case 10483:
1176  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1177  break;
1178  case 10644:
1179  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1180  break;
1181  case 963:
1182  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1183  break;
1184  case 1284:
1185  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1186  break;
1187  case 10963:
1188  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1189  break;
1190  case 11284:
1191  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1192  break;
1193  default:
1194  av_log(s->avctx, AV_LOG_ERROR,
1195  "This format is not supported (bpp=%d, bppcount=%d)\n",
1196  s->bpp, s->bppcount);
1197  return AVERROR_INVALIDDATA;
1198  }
1199 
1200  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1201  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1202  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1203  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1204  desc->nb_components < 3) {
1205  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1206  return AVERROR_INVALIDDATA;
1207  }
1208  }
1209 
1210  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1211  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1212  if (ret < 0)
1213  return ret;
1214  }
1215 
1216  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1217  return 0;
1218 
1219  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1220  return ret;
1221  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1222  if (!create_gray_palette)
1223  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1224  else {
1225  /* make default grayscale pal */
1226  int i;
1227  uint32_t *pal = (uint32_t *)frame->data[1];
1228  for (i = 0; i < 1<<s->bpp; i++)
1229  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1230  }
1231  }
1232  return 1;
1233 }
1234 
1235 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1236 {
1237  int offset = tag == TIFF_YRES ? 2 : 0;
1238  s->res[offset++] = num;
1239  s->res[offset] = den;
1240  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1241  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1242  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1243  if (num > INT64_MAX || den > INT64_MAX) {
1244  num = num >> 1;
1245  den = den >> 1;
1246  }
1247  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1248  num, den, INT32_MAX);
1249  if (!s->avctx->sample_aspect_ratio.den)
1250  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1251  }
1252 }
1253 
1255 {
1256  AVFrameSideData *sd;
1257  GetByteContext gb_temp;
1258  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1259  int i, start;
1260  int pos;
1261  int ret;
1262  double *dp;
1263 
1264  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1265  if (ret < 0) {
1266  goto end;
1267  }
1268  if (tag <= s->last_tag)
1269  return AVERROR_INVALIDDATA;
1270 
1271  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1272  if (tag != TIFF_STRIP_SIZE)
1273  s->last_tag = tag;
1274 
1275  off = bytestream2_tell(&s->gb);
1276  if (count == 1) {
1277  switch (type) {
1278  case AV_TIFF_BYTE:
1279  case AV_TIFF_SHORT:
1280  case AV_TIFF_LONG:
1281  value = ff_tget(&s->gb, type, s->le);
1282  break;
1283  case AV_TIFF_RATIONAL:
1284  value = ff_tget_long(&s->gb, s->le);
1285  value2 = ff_tget_long(&s->gb, s->le);
1286  if (!value2) {
1287  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1288  value2 = 1;
1289  }
1290 
1291  break;
1292  case AV_TIFF_STRING:
1293  if (count <= 4) {
1294  break;
1295  }
1296  default:
1297  value = UINT_MAX;
1298  }
1299  }
1300 
1301  switch (tag) {
1302  case TIFF_SUBFILE:
1303  s->is_thumbnail = (value != 0);
1304  break;
1305  case TIFF_WIDTH:
1306  if (value > INT_MAX)
1307  return AVERROR_INVALIDDATA;
1308  s->width = value;
1309  break;
1310  case TIFF_HEIGHT:
1311  if (value > INT_MAX)
1312  return AVERROR_INVALIDDATA;
1313  s->height = value;
1314  break;
1315  case TIFF_BPP:
1316  if (count > 5 || count <= 0) {
1317  av_log(s->avctx, AV_LOG_ERROR,
1318  "This format is not supported (bpp=%d, %d components)\n",
1319  value, count);
1320  return AVERROR_INVALIDDATA;
1321  }
1322  s->bppcount = count;
1323  if (count == 1)
1324  s->bpp = value;
1325  else {
1326  switch (type) {
1327  case AV_TIFF_BYTE:
1328  case AV_TIFF_SHORT:
1329  case AV_TIFF_LONG:
1330  s->bpp = 0;
1331  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1332  return AVERROR_INVALIDDATA;
1333  for (i = 0; i < count; i++)
1334  s->bpp += ff_tget(&s->gb, type, s->le);
1335  break;
1336  default:
1337  s->bpp = -1;
1338  }
1339  }
1340  break;
1342  if (count != 1) {
1343  av_log(s->avctx, AV_LOG_ERROR,
1344  "Samples per pixel requires a single value, many provided\n");
1345  return AVERROR_INVALIDDATA;
1346  }
1347  if (value > 5 || value <= 0) {
1348  av_log(s->avctx, AV_LOG_ERROR,
1349  "Invalid samples per pixel %d\n", value);
1350  return AVERROR_INVALIDDATA;
1351  }
1352  if (s->bppcount == 1)
1353  s->bpp *= value;
1354  s->bppcount = value;
1355  break;
1356  case TIFF_COMPR:
1357  s->compr = value;
1358  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1359  s->predictor = 0;
1360  switch (s->compr) {
1361  case TIFF_RAW:
1362  case TIFF_PACKBITS:
1363  case TIFF_LZW:
1364  case TIFF_CCITT_RLE:
1365  break;
1366  case TIFF_G3:
1367  case TIFF_G4:
1368  s->fax_opts = 0;
1369  break;
1370  case TIFF_DEFLATE:
1371  case TIFF_ADOBE_DEFLATE:
1372 #if CONFIG_ZLIB
1373  break;
1374 #else
1375  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1376  return AVERROR(ENOSYS);
1377 #endif
1378  case TIFF_JPEG:
1379  case TIFF_NEWJPEG:
1380  s->is_jpeg = 1;
1381  break;
1382  case TIFF_LZMA:
1383 #if CONFIG_LZMA
1384  break;
1385 #else
1386  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1387  return AVERROR(ENOSYS);
1388 #endif
1389  default:
1390  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1391  s->compr);
1392  return AVERROR_INVALIDDATA;
1393  }
1394  break;
1395  case TIFF_ROWSPERSTRIP:
1396  if (!value || (type == AV_TIFF_LONG && value == UINT_MAX))
1397  value = s->height;
1398  s->rps = FFMIN(value, s->height);
1399  break;
1400  case TIFF_STRIP_OFFS:
1401  if (count == 1) {
1402  if (value > INT_MAX) {
1403  av_log(s->avctx, AV_LOG_ERROR,
1404  "strippos %u too large\n", value);
1405  return AVERROR_INVALIDDATA;
1406  }
1407  s->strippos = 0;
1408  s->stripoff = value;
1409  } else
1410  s->strippos = off;
1411  s->strips = count;
1412  if (s->strips == s->bppcount)
1413  s->rps = s->height;
1414  s->sot = type;
1415  break;
1416  case TIFF_STRIP_SIZE:
1417  if (count == 1) {
1418  if (value > INT_MAX) {
1419  av_log(s->avctx, AV_LOG_ERROR,
1420  "stripsize %u too large\n", value);
1421  return AVERROR_INVALIDDATA;
1422  }
1423  s->stripsizesoff = 0;
1424  s->stripsize = value;
1425  s->strips = 1;
1426  } else {
1427  s->stripsizesoff = off;
1428  }
1429  s->strips = count;
1430  s->sstype = type;
1431  break;
1432  case TIFF_XRES:
1433  case TIFF_YRES:
1434  set_sar(s, tag, value, value2);
1435  break;
1436  case TIFF_TILE_OFFSETS:
1437  s->tile_offsets_offset = off;
1438  s->is_tiled = 1;
1439  break;
1440  case TIFF_TILE_BYTE_COUNTS:
1441  s->tile_byte_counts_offset = off;
1442  break;
1443  case TIFF_TILE_LENGTH:
1444  if (value > INT_MAX)
1445  return AVERROR_INVALIDDATA;
1446  s->tile_length = value;
1447  break;
1448  case TIFF_TILE_WIDTH:
1449  if (value > INT_MAX)
1450  return AVERROR_INVALIDDATA;
1451  s->tile_width = value;
1452  break;
1453  case TIFF_PREDICTOR:
1454  if (value > INT_MAX)
1455  return AVERROR_INVALIDDATA;
1456  s->predictor = value;
1457  break;
1458  case TIFF_SUB_IFDS:
1459  if (count == 1)
1460  s->sub_ifd = value;
1461  else if (count > 1)
1462  s->sub_ifd = ff_tget_long(&s->gb, s->le); /** Only get the first SubIFD */
1463  break;
1466  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1467  return AVERROR_INVALIDDATA;
1468  for (int i = 0; i < count; i++)
1469  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1470  s->white_level = s->dng_lut[count-1];
1471  break;
1472  case DNG_BLACK_LEVEL:
1473  if (count > FF_ARRAY_ELEMS(s->black_level))
1474  return AVERROR_INVALIDDATA;
1475  s->black_level[0] = value / (float)value2;
1476  for (int i = 0; i < count && count > 1; i++) {
1477  if (type == AV_TIFF_RATIONAL) {
1478  value = ff_tget_long(&s->gb, s->le);
1479  value2 = ff_tget_long(&s->gb, s->le);
1480  if (!value2) {
1481  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1482  value2 = 1;
1483  }
1484 
1485  s->black_level[i] = value / (float)value2;
1486  } else if (type == AV_TIFF_SRATIONAL) {
1487  int value = ff_tget_long(&s->gb, s->le);
1488  int value2 = ff_tget_long(&s->gb, s->le);
1489  if (!value2) {
1490  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1491  value2 = 1;
1492  }
1493 
1494  s->black_level[i] = value / (float)value2;
1495  } else {
1496  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1497  }
1498  }
1499  for (int i = count; i < 4 && count > 0; i++)
1500  s->black_level[i] = s->black_level[count - 1];
1501  break;
1502  case DNG_WHITE_LEVEL:
1503  s->white_level = value;
1504  break;
1505  case TIFF_CFA_PATTERN_DIM:
1506  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1507  ff_tget(&s->gb, type, s->le) != 2)) {
1508  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1509  return AVERROR_INVALIDDATA;
1510  }
1511  break;
1512  case TIFF_CFA_PATTERN:
1513  s->is_bayer = 1;
1514  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1515  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1516  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1517  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1518  break;
1519  case TIFF_PHOTOMETRIC:
1520  switch (value) {
1523  case TIFF_PHOTOMETRIC_RGB:
1527  case TIFF_PHOTOMETRIC_CFA:
1528  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1529  s->photometric = value;
1530  break;
1538  "PhotometricInterpretation 0x%04X",
1539  value);
1540  return AVERROR_PATCHWELCOME;
1541  default:
1542  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1543  "unknown\n", value);
1544  return AVERROR_INVALIDDATA;
1545  }
1546  break;
1547  case TIFF_FILL_ORDER:
1548  if (value < 1 || value > 2) {
1549  av_log(s->avctx, AV_LOG_ERROR,
1550  "Unknown FillOrder value %d, trying default one\n", value);
1551  value = 1;
1552  }
1553  s->fill_order = value - 1;
1554  break;
1555  case TIFF_PAL: {
1556  GetByteContext pal_gb[3];
1557  off = type_sizes[type];
1558  if (count / 3 > 256 ||
1559  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1560  return AVERROR_INVALIDDATA;
1561 
1562  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1563  bytestream2_skip(&pal_gb[1], count / 3 * off);
1564  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1565 
1566  off = (type_sizes[type] - 1) << 3;
1567  if (off > 31U) {
1568  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1569  return AVERROR_INVALIDDATA;
1570  }
1571 
1572  for (i = 0; i < count / 3; i++) {
1573  uint32_t p = 0xFF000000;
1574  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1575  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1576  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1577  s->palette[i] = p;
1578  }
1579  s->palette_is_set = 1;
1580  break;
1581  }
1582  case TIFF_PLANAR:
1583  s->planar = value == 2;
1584  break;
1586  if (count != 2) {
1587  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1588  return AVERROR_INVALIDDATA;
1589  }
1590  for (i = 0; i < count; i++) {
1591  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1592  if (s->subsampling[i] <= 0) {
1593  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1594  s->subsampling[i] = 1;
1595  return AVERROR_INVALIDDATA;
1596  }
1597  }
1598  break;
1599  case TIFF_T4OPTIONS:
1600  if (s->compr == TIFF_G3) {
1601  if (value > INT_MAX)
1602  return AVERROR_INVALIDDATA;
1603  s->fax_opts = value;
1604  }
1605  break;
1606  case TIFF_T6OPTIONS:
1607  if (s->compr == TIFF_G4) {
1608  if (value > INT_MAX)
1609  return AVERROR_INVALIDDATA;
1610  s->fax_opts = value;
1611  }
1612  break;
1613 #define ADD_METADATA(count, name, sep)\
1614  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1615  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1616  goto end;\
1617  }
1619  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1620  break;
1622  ADD_METADATA(count, "ModelTransformationTag", NULL);
1623  break;
1624  case TIFF_MODEL_TIEPOINT:
1625  ADD_METADATA(count, "ModelTiepointTag", NULL);
1626  break;
1628  if (s->geotag_count) {
1629  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1630  return AVERROR_INVALIDDATA;
1631  }
1632  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1633  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1634  s->geotag_count = ff_tget_short(&s->gb, s->le);
1635  if (s->geotag_count > count / 4 - 1) {
1636  s->geotag_count = count / 4 - 1;
1637  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1638  }
1639  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1640  || s->geotag_count == 0) {
1641  s->geotag_count = 0;
1642  return -1;
1643  }
1644  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1645  if (!s->geotags) {
1646  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1647  s->geotag_count = 0;
1648  goto end;
1649  }
1650  for (i = 0; i < s->geotag_count; i++) {
1651  unsigned val;
1652  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1653  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1654  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1655  val = ff_tget_short(&s->gb, s->le);
1656 
1657  if (!s->geotags[i].type) {
1658  const char *str = get_geokey_val(s->geotags[i].key, val);
1659 
1660  s->geotags[i].val = str ? av_strdup(str) : av_asprintf("Unknown-%u", val);
1661  if (!s->geotags[i].val)
1662  return AVERROR(ENOMEM);
1663  } else
1664  s->geotags[i].offset = val;
1665  }
1666  break;
1668  if (count >= INT_MAX / sizeof(int64_t))
1669  return AVERROR_INVALIDDATA;
1670  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1671  return AVERROR_INVALIDDATA;
1672  dp = av_malloc_array(count, sizeof(double));
1673  if (!dp) {
1674  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1675  goto end;
1676  }
1677  for (i = 0; i < count; i++)
1678  dp[i] = ff_tget_double(&s->gb, s->le);
1679  for (i = 0; i < s->geotag_count; i++) {
1680  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1681  if (s->geotags[i].count == 0
1682  || s->geotags[i].offset + s->geotags[i].count > count) {
1683  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1684  } else if (s->geotags[i].val) {
1685  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1686  } else {
1687  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1688  if (!ap) {
1689  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1690  av_freep(&dp);
1691  return AVERROR(ENOMEM);
1692  }
1693  s->geotags[i].val = ap;
1694  }
1695  }
1696  }
1697  av_freep(&dp);
1698  break;
1699  case TIFF_GEO_ASCII_PARAMS:
1700  pos = bytestream2_tell(&s->gb);
1701  for (i = 0; i < s->geotag_count; i++) {
1702  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1703  if (s->geotags[i].count == 0
1704  || s->geotags[i].offset + s->geotags[i].count > count) {
1705  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1706  } else {
1707  char *ap;
1708 
1709  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1710  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1711  return AVERROR_INVALIDDATA;
1712  if (s->geotags[i].val)
1713  return AVERROR_INVALIDDATA;
1714  ap = av_malloc(s->geotags[i].count);
1715  if (!ap) {
1716  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1717  return AVERROR(ENOMEM);
1718  }
1719  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1720  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1721  s->geotags[i].val = ap;
1722  }
1723  }
1724  }
1725  break;
1726  case TIFF_ICC_PROFILE:
1727  gb_temp = s->gb;
1728  bytestream2_seek(&gb_temp, off, SEEK_SET);
1729 
1730  if (bytestream2_get_bytes_left(&gb_temp) < count)
1731  return AVERROR_INVALIDDATA;
1732 
1734  if (ret < 0)
1735  return ret;
1736  if (sd)
1737  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1738  break;
1739  case TIFF_ARTIST:
1740  ADD_METADATA(count, "artist", NULL);
1741  break;
1742  case TIFF_COPYRIGHT:
1743  ADD_METADATA(count, "copyright", NULL);
1744  break;
1745  case TIFF_DATE:
1746  ADD_METADATA(count, "date", NULL);
1747  break;
1748  case TIFF_DOCUMENT_NAME:
1749  ADD_METADATA(count, "document_name", NULL);
1750  break;
1751  case TIFF_HOST_COMPUTER:
1752  ADD_METADATA(count, "computer", NULL);
1753  break;
1755  ADD_METADATA(count, "description", NULL);
1756  break;
1757  case TIFF_MAKE:
1758  ADD_METADATA(count, "make", NULL);
1759  break;
1760  case TIFF_MODEL:
1761  ADD_METADATA(count, "model", NULL);
1762  break;
1763  case TIFF_PAGE_NAME:
1764  ADD_METADATA(count, "page_name", NULL);
1765  break;
1766  case TIFF_PAGE_NUMBER:
1767  ADD_METADATA(count, "page_number", " / ");
1768  // need to seek back to re-read the page number
1769  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1770  // read the page number
1771  s->cur_page = ff_tget_short(&s->gb, s->le);
1772  // get back to where we were before the previous seek
1773  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1774  break;
1775  case TIFF_SOFTWARE_NAME:
1776  ADD_METADATA(count, "software", NULL);
1777  break;
1778  case DNG_VERSION:
1779  if (count == 4) {
1780  unsigned int ver[4];
1781  ver[0] = ff_tget(&s->gb, type, s->le);
1782  ver[1] = ff_tget(&s->gb, type, s->le);
1783  ver[2] = ff_tget(&s->gb, type, s->le);
1784  ver[3] = ff_tget(&s->gb, type, s->le);
1785 
1786  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1787  ver[0], ver[1], ver[2], ver[3]);
1788 
1790  }
1791  break;
1792  case DNG_ANALOG_BALANCE:
1793  if (type != AV_TIFF_RATIONAL)
1794  break;
1795 
1796  for (int i = 0; i < 3; i++) {
1797  value = ff_tget_long(&s->gb, s->le);
1798  value2 = ff_tget_long(&s->gb, s->le);
1799  if (!value2) {
1800  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1801  value2 = 1;
1802  }
1803 
1804  s->analog_balance[i] = value / (float)value2;
1805  }
1806  break;
1807  case DNG_AS_SHOT_NEUTRAL:
1808  if (type != AV_TIFF_RATIONAL)
1809  break;
1810 
1811  for (int i = 0; i < 3; i++) {
1812  value = ff_tget_long(&s->gb, s->le);
1813  value2 = ff_tget_long(&s->gb, s->le);
1814  if (!value2) {
1815  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1816  value2 = 1;
1817  }
1818 
1819  s->as_shot_neutral[i] = value / (float)value2;
1820  }
1821  break;
1822  case DNG_AS_SHOT_WHITE_XY:
1823  if (type != AV_TIFF_RATIONAL)
1824  break;
1825 
1826  for (int i = 0; i < 2; i++) {
1827  value = ff_tget_long(&s->gb, s->le);
1828  value2 = ff_tget_long(&s->gb, s->le);
1829  if (!value2) {
1830  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1831  value2 = 1;
1832  }
1833 
1834  s->as_shot_white[i] = value / (float)value2;
1835  }
1836  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1837  for (int i = 0; i < 3; i++) {
1838  s->as_shot_white[i] /= d65_white[i];
1839  }
1840  break;
1841  case DNG_COLOR_MATRIX1:
1842  case DNG_COLOR_MATRIX2:
1843  for (int i = 0; i < 3; i++) {
1844  for (int j = 0; j < 3; j++) {
1845  int value = ff_tget_long(&s->gb, s->le);
1846  int value2 = ff_tget_long(&s->gb, s->le);
1847  if (!value2) {
1848  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1849  value2 = 1;
1850  }
1851  s->color_matrix[i][j] = value / (float)value2;
1852  }
1853  s->use_color_matrix = 1;
1854  }
1855  break;
1858  for (int i = 0; i < 3; i++) {
1859  for (int j = 0; j < 3; j++) {
1860  int value = ff_tget_long(&s->gb, s->le);
1861  int value2 = ff_tget_long(&s->gb, s->le);
1862  if (!value2) {
1863  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1864  value2 = 1;
1865  }
1866  s->camera_calibration[i][j] = value / (float)value2;
1867  }
1868  }
1869  break;
1870  case CINEMADNG_TIME_CODES:
1871  case CINEMADNG_FRAME_RATE:
1872  case CINEMADNG_T_STOP:
1873  case CINEMADNG_REEL_NAME:
1876  break;
1877  default:
1878  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1879  av_log(s->avctx, AV_LOG_ERROR,
1880  "Unknown or unsupported tag %d/0x%0X\n",
1881  tag, tag);
1882  return AVERROR_INVALIDDATA;
1883  }
1884  }
1885 end:
1886  if (s->bpp > 128U) {
1887  av_log(s->avctx, AV_LOG_ERROR,
1888  "This format is not supported (bpp=%d, %d components)\n",
1889  s->bpp, count);
1890  s->bpp = 0;
1891  return AVERROR_INVALIDDATA;
1892  }
1893  bytestream2_seek(&s->gb, start, SEEK_SET);
1894  return 0;
1895 }
1896 
1897 static const float xyz2rgb[3][3] = {
1898  { 0.412453f, 0.357580f, 0.180423f },
1899  { 0.212671f, 0.715160f, 0.072169f },
1900  { 0.019334f, 0.119193f, 0.950227f },
1901 };
1902 
1904  float rgb2cam[3][4],
1905  double cam2xyz[4][3])
1906 {
1907  double cam2rgb[4][3], num;
1908  int i, j, k;
1909 
1910  for (i = 0; i < 3; i++) {
1911  for (j = 0; j < 3; j++) {
1912  cam2rgb[i][j] = 0.;
1913  for (k = 0; k < 3; k++)
1914  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1915  }
1916  }
1917 
1918  for (i = 0; i < 3; i++) {
1919  for (num = j = 0; j < 3; j++)
1920  num += cam2rgb[i][j];
1921  if (!num)
1922  num = 1;
1923  for (j = 0; j < 3; j++)
1924  cam2rgb[i][j] /= num;
1925  s->premultiply[i] = 1.f / num;
1926  }
1927 }
1928 
1929 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1930  int *got_frame, AVPacket *avpkt)
1931 {
1932  TiffContext *const s = avctx->priv_data;
1933  unsigned off, last_off = 0;
1934  int le, ret, plane, planes;
1935  int i, j, entries, stride;
1936  unsigned soff, ssize;
1937  uint8_t *dst;
1938  GetByteContext stripsizes;
1939  GetByteContext stripdata;
1940  int retry_for_subifd, retry_for_page;
1941  int is_dng;
1942  int has_tile_bits, has_strip_bits;
1943 
1944  av_exif_free(&s->exif_meta);
1945  /* this will not parse the image data */
1946  ret = av_exif_parse_buffer(avctx, avpkt->data, avpkt->size, &s->exif_meta, AV_EXIF_TIFF_HEADER);
1947  if (ret < 0)
1948  av_log(avctx, AV_LOG_ERROR, "could not parse EXIF data: %s\n", av_err2str(ret));
1949 
1950  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1951 
1952  // parse image header
1953  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1954  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1955  return ret;
1956  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1957  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1958  return AVERROR_INVALIDDATA;
1959  }
1960  s->le = le;
1961  // TIFF_BPP is not a required tag and defaults to 1
1962 
1963  s->tiff_type = TIFF_TYPE_TIFF;
1964  s->use_color_matrix = 0;
1965 again:
1966  s->is_thumbnail = 0;
1967  s->bppcount = s->bpp = 1;
1968  s->photometric = TIFF_PHOTOMETRIC_NONE;
1969  s->compr = TIFF_RAW;
1970  s->fill_order = 0;
1971  s->white_level = 0;
1972  s->is_bayer = 0;
1973  s->is_tiled = 0;
1974  s->is_jpeg = 0;
1975  s->cur_page = 0;
1976  s->last_tag = 0;
1977 
1978  for (i = 0; i < 65536; i++)
1979  s->dng_lut[i] = i;
1980 
1981  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1982  s->black_level[i] = 0.f;
1983 
1984  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1985  s->as_shot_neutral[i] = 0.f;
1986 
1987  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1988  s->as_shot_white[i] = 1.f;
1989 
1990  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1991  s->analog_balance[i] = 1.f;
1992 
1993  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1994  s->premultiply[i] = 1.f;
1995 
1996  for (i = 0; i < 4; i++)
1997  for (j = 0; j < 4; j++)
1998  s->camera_calibration[i][j] = i == j;
1999 
2000  free_geotags(s);
2001 
2002  // Reset these offsets so we can tell if they were set this frame
2003  s->stripsizesoff = s->strippos = 0;
2004  /* parse image file directory */
2005  bytestream2_seek(&s->gb, off, SEEK_SET);
2006  entries = ff_tget_short(&s->gb, le);
2007  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
2008  return AVERROR_INVALIDDATA;
2009  for (i = 0; i < entries; i++) {
2010  if ((ret = tiff_decode_tag(s, p)) < 0)
2011  return ret;
2012  }
2013 
2014  if (s->get_thumbnail && !s->is_thumbnail) {
2015  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
2016  return AVERROR_EOF;
2017  }
2018 
2019  /** whether we should process this IFD's SubIFD */
2020  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
2021  /** whether we should process this multi-page IFD's next page */
2022  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
2023 
2024  if (retry_for_page) {
2025  // set offset to the next IFD
2026  off = ff_tget_long(&s->gb, le);
2027  } else if (retry_for_subifd) {
2028  // set offset to the SubIFD
2029  off = s->sub_ifd;
2030  }
2031 
2032  if (retry_for_subifd || retry_for_page) {
2033  if (!off) {
2034  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2035  return AVERROR_INVALIDDATA;
2036  }
2037  if (off <= last_off) {
2038  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2039  return AVERROR_INVALIDDATA;
2040  }
2041  last_off = off;
2042  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2043  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2044  return AVERROR_INVALIDDATA;
2045  }
2046  s->sub_ifd = 0;
2047  goto again;
2048  }
2049 
2050  /* At this point we've decided on which (Sub)IFD to process */
2051 
2052  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2053 
2054  for (i = 0; i<s->geotag_count; i++) {
2055  const char *keyname = get_geokey_name(s->geotags[i].key);
2056  if (!keyname) {
2057  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2058  continue;
2059  }
2060  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2061  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2062  continue;
2063  }
2064  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, AV_DICT_DONT_STRDUP_VAL);
2065  s->geotags[i].val = NULL;
2066  if (ret<0) {
2067  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2068  return ret;
2069  }
2070  }
2071 
2072  if (is_dng) {
2073  double cam2xyz[4][3];
2074  float cmatrix[3][4];
2075  float pmin = FLT_MAX;
2076  int bps;
2077 
2078  for (i = 0; i < 3; i++) {
2079  for (j = 0; j < 3; j++)
2080  s->camera_calibration[i][j] *= s->analog_balance[i];
2081  }
2082 
2083  if (!s->use_color_matrix) {
2084  for (i = 0; i < 3; i++) {
2085  if (s->camera_calibration[i][i])
2086  s->premultiply[i] /= s->camera_calibration[i][i];
2087  }
2088  } else {
2089  for (int c = 0; c < 3; c++) {
2090  for (i = 0; i < 3; i++) {
2091  cam2xyz[c][i] = 0.;
2092  for (j = 0; j < 3; j++)
2093  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2094  }
2095  }
2096 
2097  camera_xyz_coeff(s, cmatrix, cam2xyz);
2098  }
2099 
2100  for (int c = 0; c < 3; c++)
2101  pmin = fminf(pmin, s->premultiply[c]);
2102 
2103  for (int c = 0; c < 3; c++)
2104  s->premultiply[c] /= pmin;
2105 
2106  if (s->bpp % s->bppcount)
2107  return AVERROR_INVALIDDATA;
2108  bps = s->bpp / s->bppcount;
2109  if (bps < 8 || bps > 32)
2110  return AVERROR_INVALIDDATA;
2111 
2112  if (s->white_level == 0)
2113  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2114 
2115  if (s->white_level <= s->black_level[0]) {
2116  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2117  s->black_level[0], s->white_level);
2118  return AVERROR_INVALIDDATA;
2119  }
2120 
2121  if (s->planar)
2122  return AVERROR_PATCHWELCOME;
2123  }
2124 
2125  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2126  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2127  return AVERROR_INVALIDDATA;
2128  }
2129 
2130  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2131  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2132 
2133  if (has_tile_bits && has_strip_bits) {
2134  int tiled_dng = s->is_tiled && is_dng;
2135  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2136  if (!tiled_dng)
2137  return AVERROR_INVALIDDATA;
2138  }
2139 
2140  /* now we have the data and may start decoding */
2141  if ((ret = init_image(s, p)) <= 0)
2142  return ret;
2143 
2144  if (!s->is_tiled || has_strip_bits) {
2145  if (s->strips == 1 && !s->stripsize) {
2146  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2147  s->stripsize = avpkt->size - s->stripoff;
2148  }
2149 
2150  if (s->stripsizesoff) {
2151  if (s->stripsizesoff >= (unsigned)avpkt->size)
2152  return AVERROR_INVALIDDATA;
2153  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2154  avpkt->size - s->stripsizesoff);
2155  }
2156  if (s->strippos) {
2157  if (s->strippos >= (unsigned)avpkt->size)
2158  return AVERROR_INVALIDDATA;
2159  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2160  avpkt->size - s->strippos);
2161  }
2162 
2163  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2164  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2165  return AVERROR_INVALIDDATA;
2166  }
2167  }
2168 
2169  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2170  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2171  p->color_trc = AVCOL_TRC_LINEAR;
2172  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2173  p->color_trc = AVCOL_TRC_GAMMA22;
2174  }
2175 
2176  /* Handle DNG images with JPEG-compressed tiles */
2177 
2178  if (is_dng && s->is_tiled) {
2179  if (!s->is_jpeg) {
2180  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2181  return AVERROR_PATCHWELCOME;
2182  } else if (!s->is_bayer) {
2183  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2184  return AVERROR_PATCHWELCOME;
2185  } else {
2186  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2187  *got_frame = 1;
2188  return ret;
2189  }
2190  }
2191 
2192  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2193 
2194  planes = s->planar ? s->bppcount : 1;
2195  for (plane = 0; plane < planes; plane++) {
2196  uint8_t *five_planes = NULL;
2197  int remaining = avpkt->size;
2198  int decoded_height;
2199  stride = p->linesize[plane];
2200  dst = p->data[plane];
2201  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2202  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2203  stride = stride * 5 / 4;
2204  five_planes =
2205  dst = av_malloc(stride * s->height);
2206  if (!dst)
2207  return AVERROR(ENOMEM);
2208  }
2209  for (i = 0; i < s->height; i += s->rps) {
2210  if (i)
2211  dst += s->rps * stride;
2212  if (s->stripsizesoff)
2213  ssize = ff_tget(&stripsizes, s->sstype, le);
2214  else
2215  ssize = s->stripsize;
2216 
2217  if (s->strippos)
2218  soff = ff_tget(&stripdata, s->sot, le);
2219  else
2220  soff = s->stripoff;
2221 
2222  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2223  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2224  av_freep(&five_planes);
2225  return AVERROR_INVALIDDATA;
2226  }
2227  remaining -= ssize;
2228  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2229  FFMIN(s->rps, s->height - i))) < 0) {
2230  if (avctx->err_recognition & AV_EF_EXPLODE) {
2231  av_freep(&five_planes);
2232  return ret;
2233  }
2234  break;
2235  }
2236  }
2237  decoded_height = FFMIN(i, s->height);
2238 
2239  if (s->predictor == 2) {
2240  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2241  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2242  return AVERROR_PATCHWELCOME;
2243  }
2244  dst = five_planes ? five_planes : p->data[plane];
2245  soff = s->bpp >> 3;
2246  if (s->planar)
2247  soff = FFMAX(soff / s->bppcount, 1);
2248  ssize = s->width * soff;
2249  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2250  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2251  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2252  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2253  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2254  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2255  for (i = 0; i < decoded_height; i++) {
2256  for (j = soff; j < ssize; j += 2)
2257  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2258  dst += stride;
2259  }
2260  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2261  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2262  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2263  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2264  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2265  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2266  for (i = 0; i < decoded_height; i++) {
2267  for (j = soff; j < ssize; j += 2)
2268  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2269  dst += stride;
2270  }
2271  } else {
2272  for (i = 0; i < decoded_height; i++) {
2273  for (j = soff; j < ssize; j++)
2274  dst[j] += dst[j - soff];
2275  dst += stride;
2276  }
2277  }
2278  }
2279 
2280  /* Floating point predictor
2281  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2282  if (s->predictor == 3) {
2283  int channels = s->bppcount;
2284  int group_size;
2285  uint8_t *tmpbuf;
2286  int bpc;
2287 
2288  dst = five_planes ? five_planes : p->data[plane];
2289  soff = s->bpp >> 3;
2290  if (s->planar) {
2291  soff = FFMAX(soff / s->bppcount, 1);
2292  channels = 1;
2293  }
2294  ssize = s->width * soff;
2295  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2296  group_size = s->width * channels;
2297 
2298  tmpbuf = av_malloc(ssize);
2299  if (!tmpbuf) {
2300  av_free(five_planes);
2301  return AVERROR(ENOMEM);
2302  }
2303 
2304  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2305  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2306  for (i = 0; i < decoded_height; i++) {
2307  /* Copy first sample byte for each channel */
2308  for (j = 0; j < channels; j++)
2309  tmpbuf[j] = dst[j];
2310 
2311  /* Decode horizontal differences */
2312  for (j = channels; j < ssize; j++)
2313  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2314 
2315  /* Combine shuffled bytes from their separate groups. Each
2316  byte of every floating point value in a row of pixels is
2317  split and combined into separate groups. A group of all
2318  the sign/exponents bytes in the row and groups for each
2319  of the upper, mid, and lower mantissa bytes in the row. */
2320  for (j = 0; j < group_size; j++) {
2321  for (int k = 0; k < bpc; k++) {
2322  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2323  }
2324  }
2325  dst += stride;
2326  }
2327  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2328  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2329  /* Same as LE only the shuffle at the end is reversed */
2330  for (i = 0; i < decoded_height; i++) {
2331  for (j = 0; j < channels; j++)
2332  tmpbuf[j] = dst[j];
2333 
2334  for (j = channels; j < ssize; j++)
2335  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2336 
2337  for (j = 0; j < group_size; j++) {
2338  for (int k = 0; k < bpc; k++) {
2339  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2340  }
2341  }
2342  dst += stride;
2343  }
2344  } else {
2345  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2346  }
2347  av_free(tmpbuf);
2348  }
2349 
2350  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2351  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2352  dst = p->data[plane];
2353  for (i = 0; i < s->height; i++) {
2354  for (j = 0; j < stride; j++)
2355  dst[j] = c - dst[j];
2356  dst += stride;
2357  }
2358  }
2359 
2360  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2361  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2362  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2363  uint8_t *src = five_planes ? five_planes : p->data[plane];
2364  dst = p->data[plane];
2365  for (i = 0; i < s->height; i++) {
2366  for (j = 0; j < s->width; j++) {
2367  int k = 255 - src[x * j + 3];
2368  int r = (255 - src[x * j ]) * k;
2369  int g = (255 - src[x * j + 1]) * k;
2370  int b = (255 - src[x * j + 2]) * k;
2371  dst[4 * j ] = r * 257 >> 16;
2372  dst[4 * j + 1] = g * 257 >> 16;
2373  dst[4 * j + 2] = b * 257 >> 16;
2374  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2375  }
2376  src += stride;
2377  dst += p->linesize[plane];
2378  }
2379  av_freep(&five_planes);
2380  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2381  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2382  dst = p->data[plane];
2383  for (i = 0; i < s->height; i++) {
2384  for (j = 0; j < s->width; j++) {
2385  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2386  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2387  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2388  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2389  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2390  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2391  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2392  AV_WB16(dst + 8 * j + 6, 65535);
2393  }
2394  dst += p->linesize[plane];
2395  }
2396  }
2397  }
2398 
2399  if (s->planar && s->bppcount > 2) {
2400  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2401  FFSWAP(int, p->linesize[0], p->linesize[2]);
2402  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2403  FFSWAP(int, p->linesize[0], p->linesize[1]);
2404  }
2405 
2406  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2407  uint16_t *dst = (uint16_t *)p->data[0];
2408  for (i = 0; i < s->height; i++) {
2409  for (j = 0; j < s->width; j++)
2410  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2411  dst += stride / 2;
2412  }
2413  }
2414 
2415  ret = ff_decode_exif_attach_ifd(avctx, p, &s->exif_meta);
2416  if (ret < 0)
2417  av_log(avctx, AV_LOG_ERROR, "error attaching EXIF ifd: %s\n", av_err2str(ret));
2418 
2419  *got_frame = 1;
2420 
2421  return avpkt->size;
2422 }
2423 
2425 {
2426  TiffContext *s = avctx->priv_data;
2427  int ret;
2428 
2429  s->width = 0;
2430  s->height = 0;
2431  s->subsampling[0] =
2432  s->subsampling[1] = 1;
2433  s->avctx = avctx;
2434  ff_lzw_decode_open(&s->lzw);
2435  if (!s->lzw)
2436  return AVERROR(ENOMEM);
2438 
2439  /* Allocate JPEG frame */
2440  s->jpgframe = av_frame_alloc();
2441  s->jpkt = av_packet_alloc();
2442  if (!s->jpgframe || !s->jpkt)
2443  return AVERROR(ENOMEM);
2444 
2445  /* Prepare everything needed for JPEG decoding */
2447  s->avctx_mjpeg = avcodec_alloc_context3(&ff_mjpeg_decoder.p);
2448  if (!s->avctx_mjpeg)
2449  return AVERROR(ENOMEM);
2450  s->avctx_mjpeg->flags = avctx->flags;
2451  s->avctx_mjpeg->flags2 = avctx->flags2;
2452  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2453  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2454  ret = avcodec_open2(s->avctx_mjpeg, NULL, NULL);
2455  if (ret < 0) {
2456  return ret;
2457  }
2458 
2459  return 0;
2460 }
2461 
2462 static av_cold int tiff_end(AVCodecContext *avctx)
2463 {
2464  TiffContext *const s = avctx->priv_data;
2465 
2466  free_geotags(s);
2467  av_exif_free(&s->exif_meta);
2468 
2469  ff_lzw_decode_close(&s->lzw);
2470  av_freep(&s->deinvert_buf);
2471  s->deinvert_buf_size = 0;
2472  av_freep(&s->yuv_line);
2473  s->yuv_line_size = 0;
2474  av_frame_free(&s->jpgframe);
2475  av_packet_free(&s->jpkt);
2476  avcodec_free_context(&s->avctx_mjpeg);
2477  return 0;
2478 }
2479 
2480 #define OFFSET(x) offsetof(TiffContext, x)
2481 static const AVOption tiff_options[] = {
2482  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2483  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2484  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2485  { NULL },
2486 };
2487 
2488 static const AVClass tiff_decoder_class = {
2489  .class_name = "TIFF decoder",
2490  .item_name = av_default_item_name,
2491  .option = tiff_options,
2492  .version = LIBAVUTIL_VERSION_INT,
2493 };
2494 
2496  .p.name = "tiff",
2497  CODEC_LONG_NAME("TIFF image"),
2498  .p.type = AVMEDIA_TYPE_VIDEO,
2499  .p.id = AV_CODEC_ID_TIFF,
2500  .priv_data_size = sizeof(TiffContext),
2501  .init = tiff_init,
2502  .close = tiff_end,
2504  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2507  .p.priv_class = &tiff_decoder_class,
2508 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:74
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:142
TiffContext::gb
GetByteContext gb
Definition: tiff.c:63
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:148
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2495
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:113
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:162
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1254
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:882
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:108
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:565
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:199
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:132
TiffContext::exif_meta
AVExifMetadata exif_meta
Definition: tiff.c:129
GetByteContext
Definition: bytestream.h:33
AVExifMetadata
Definition: exif.h:76
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:104
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1903
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:675
TiffContext::strippos
int strippos
Definition: tiff.c:111
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:88
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:161
OFFSET
#define OFFSET(x)
Definition: tiff.c:2480
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1410
TiffContext::sot
int sot
Definition: tiff.c:110
int64_t
long long int64_t
Definition: coverity.c:34
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:249
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1536
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:128
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:156
av_unused
#define av_unused
Definition: attributes.h:164
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2462
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:116
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:134
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:90
AVPacket::data
uint8_t * data
Definition: packet.h:595
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:200
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:43
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:133
FFCodec
Definition: codec_internal.h:127
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:445
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:59
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:93
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:152
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:120
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:160
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:162
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:127
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:96
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:91
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:574
TiffGeoTagKeyName
Definition: tiff.h:221
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:191
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:135
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:147
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:119
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:289
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:119
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2481
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:72
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:204
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:51
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:195
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:122
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:104
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AV_TIFF_SHORT
@ AV_TIFF_SHORT
Definition: exif.h:45
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:73
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:81
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:393
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:472
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:134
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:972
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:85
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:54
GetBitContext
Definition: get_bits.h:109
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:146
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:121
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:659
TiffContext::tile_length
int tile_length
Definition: tiff.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:69
val
static double val(void *priv, double ch)
Definition: aeval.c:77
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:389
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:98
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:182
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:72
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:96
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:671
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:127
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:104
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
get_geokey_val
static const char * get_geokey_val(int key, uint16_t val)
Definition: tiff.c:190
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
TiffGeoTag
Definition: tiff.h:213
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:67
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:119
TiffContext::rps
int rps
Definition: tiff.c:109
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:650
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:100
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:99
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:120
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:122
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
TiffContext::stripsize
int stripsize
Definition: tiff.c:111
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
attributes_internal.h
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:559
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:130
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
TiffContext::width
int width
Definition: tiff.c:75
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:285
g
const char * g
Definition: vf_curves.c:128
AV_TIFF_RATIONAL
@ AV_TIFF_RATIONAL
Definition: exif.h:47
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:34
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1044
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:60
fminf
float fminf(float, float)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Alias for avcodec_receive_frame_flags(avctx, frame, 0).
Definition: avcodec.c:723
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:123
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:144
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:653
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:202
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:92
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:121
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:71
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:53
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1794
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:91
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:68
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:80
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:81
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
EXTERN
#define EXTERN
Definition: attributes_internal.h:34
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:181
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:286
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:571
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:308
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:119
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:126
TiffContext::height
int height
Definition: tiff.c:75
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:64
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:185
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:131
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2424
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:97
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2478
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:45
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:197
TiffContext
Definition: tiff.c:60
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:88
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:62
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:194
tiff_common.h
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:70
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:112
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:91
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:198
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:101
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:573
MJpegDecodeContext
Definition: mjpegdec.h:56
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:77
RET_GEOKEY_TYPE
#define RET_GEOKEY_TYPE(TYPE, array)
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:74
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:118
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:58
TIFF_G3
@ TIFF_G3
Definition: tiff.h:129
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:46
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:80
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:77
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:187
AV_TIFF_BYTE
@ AV_TIFF_BYTE
Definition: exif.h:43
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:503
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:551
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:596
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:40
height
#define height
Definition: dsp.h:89
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:95
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:109
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:2047
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:97
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1897
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2172
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:55
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:76
AVFrameSideData::data
uint8_t * data
Definition: frame.h:292
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:141
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:67
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:52
TiffContext::fill_order
int fill_order
Definition: tiff.c:86
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:93
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:79
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:55
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
TiffContext::white_level
unsigned white_level
Definition: tiff.c:103
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:111
planes
static const struct @585 planes[]
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
AV_TIFF_STRING
@ AV_TIFF_STRING
Definition: exif.h:44
TiffContext::planar
int planar
Definition: tiff.c:82
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:49
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:47
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:176
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2488
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:105
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:68
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:203
TiffContext::le
int le
Definition: tiff.c:79
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:121
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:721
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:83
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:71
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1929
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:201
AV_TIFF_SRATIONAL
@ AV_TIFF_SRATIONAL
Definition: exif.h:52
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:51
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:192
TiffContext::tile_width
int tile_width
Definition: tiff.c:117
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:84
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:76
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:458
TiffContext::res
uint32_t res[4]
Definition: tiff.c:87
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:92
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:63
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:287
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:36
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
av_always_inline
#define av_always_inline
Definition: attributes.h:76
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:77
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:358
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1538
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:38
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:102
TiffContext::stripoff
int stripoff
Definition: tiff.c:111
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:190
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:89
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:56
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:78
avcodec.h
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:2046
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:75
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:106
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:78
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:48
d65_white
static const float d65_white[3]
Definition: tiff.c:132
pos
unsigned int pos
Definition: spdifenc.c:414
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:147
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:50
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:57
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:189
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:89
AVCodecContext
main external API structure.
Definition: avcodec.h:439
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:423
TiffContext::sstype
int sstype
Definition: tiff.c:109
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:76
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:112
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1235
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:137
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:632
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:95
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:122
AV_TIFF_DOUBLE
@ AV_TIFF_DOUBLE
Definition: exif.h:54
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:115
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:162
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:421
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
RET_GEOKEY_STR
#define RET_GEOKEY_STR(TYPE, array)
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:62
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:94
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:110
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
desc
const char * desc
Definition: libsvtav1.c:83
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:424
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
av_strdup
#define av_strdup(s)
Definition: ops_asmgen.c:47
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1045
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:290
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:139
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:136
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:193
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:572
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:82
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:316
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:743
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:107
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:116
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:100
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:66
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:61
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:275
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:126
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:150
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:107
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
stride
#define stride
Definition: h264pred_template.c:536
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:37
width
#define width
Definition: dsp.h:89
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:520
TiffContext::predictor
int predictor
Definition: tiff.c:85
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:572
AV_TIFF_LONG
@ AV_TIFF_LONG
Definition: exif.h:46
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:64
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:196
TiffContext::strips
int strips
Definition: tiff.c:109
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:158
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:119
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:106
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:288
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:124
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:111
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:142