FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/avstring.h"
40 #include "libavutil/error.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/opt.h"
43 #include "libavutil/reverse.h"
44 #include "avcodec.h"
45 #include "bytestream.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 #include "faxcompr.h"
49 #include "lzw.h"
50 #include "tiff.h"
51 #include "tiff_common.h"
52 #include "tiff_data.h"
53 #include "mjpegdec.h"
54 #include "thread.h"
55 #include "get_bits.h"
56 
57 typedef struct TiffContext {
58  AVClass *class;
61 
62  /* JPEG decoding for DNG */
63  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
64  AVPacket *jpkt; // encoded JPEG tile
65  AVFrame *jpgframe; // decoded JPEG tile
66 
68  uint16_t get_page;
70 
72  int width, height;
73  unsigned int bpp, bppcount;
74  uint32_t palette[256];
76  int le;
79  int planar;
80  int subsampling[2];
81  int fax_opts;
82  int predictor;
84  uint32_t res[4];
86  unsigned last_tag;
87 
88  int is_bayer;
90  uint8_t pattern[4];
91 
92  float analog_balance[4];
93  float as_shot_neutral[4];
94  float as_shot_white[4];
95  float color_matrix[3][4];
96  float camera_calibration[4][4];
97  float premultiply[4];
98  float black_level[4];
99 
100  unsigned white_level;
101  uint16_t dng_lut[65536];
102 
103  uint32_t sub_ifd;
104  uint16_t cur_page;
105 
107  int sot;
110 
111  /* Tile support */
112  int is_tiled;
115 
116  int is_jpeg;
117 
118  uint8_t *deinvert_buf;
120  uint8_t *yuv_line;
121  unsigned int yuv_line_size;
122 
125 } TiffContext;
126 
127 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
128 
129 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
130  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
131  s->tiff_type = tiff_type;
132 }
133 
134 static void free_geotags(TiffContext *const s)
135 {
136  for (int i = 0; i < s->geotag_count; i++)
137  av_freep(&s->geotags[i].val);
138  av_freep(&s->geotags);
139  s->geotag_count = 0;
140 }
141 
142 static const char *get_geokey_name(int key)
143 {
144 #define RET_GEOKEY_STR(TYPE, array)\
145  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
146  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
147  return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
148 
149  RET_GEOKEY_STR(VERT, vert);
150  RET_GEOKEY_STR(PROJ, proj);
151  RET_GEOKEY_STR(GEOG, geog);
152  RET_GEOKEY_STR(CONF, conf);
153 
154  return NULL;
155 }
156 
157 static int get_geokey_type(int key)
158 {
159 #define RET_GEOKEY_TYPE(TYPE, array)\
160  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
161  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
162  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
163  RET_GEOKEY_TYPE(VERT, vert);
164  RET_GEOKEY_TYPE(PROJ, proj);
165  RET_GEOKEY_TYPE(GEOG, geog);
166  RET_GEOKEY_TYPE(CONF, conf);
167 
168  return AVERROR_INVALIDDATA;
169 }
170 
171 static int cmp_id_key(const void *id, const void *k)
172 {
173  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
174 }
175 
176 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
177 {
178  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
179  if(r)
180  return r->name;
181 
182  return NULL;
183 }
184 
185 static const char *get_geokey_val(int key, uint16_t val)
186 {
188  return "undefined";
190  return "User-Defined";
191 
192 #define RET_GEOKEY_VAL(TYPE, array)\
193  if (val >= TIFF_##TYPE##_OFFSET &&\
194  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
195  return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
196 
197  switch (key) {
199  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
200  break;
202  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
203  break;
207  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
208  break;
211  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
212  break;
214  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
215  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
216  break;
218  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
219  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
220  break;
222  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
223  break;
225  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
226  break;
232  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
233  break;
235  RET_GEOKEY_VAL(VERT_CS, vert_cs);
236  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
237  break;
238 
239  }
240 
241  return NULL;
242 }
243 
244 static char *doubles2str(double *dp, int count, const char *sep)
245 {
246  int i;
247  char *ap, *ap0;
248  uint64_t component_len;
249  if (!sep) sep = ", ";
250  component_len = 24LL + strlen(sep);
251  if (count >= (INT_MAX - 1)/component_len)
252  return NULL;
253  ap = av_malloc(component_len * count + 1);
254  if (!ap)
255  return NULL;
256  ap0 = ap;
257  ap[0] = '\0';
258  for (i = 0; i < count; i++) {
259  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
260  if(l >= component_len) {
261  av_free(ap0);
262  return NULL;
263  }
264  ap += l;
265  }
266  ap0[strlen(ap0) - strlen(sep)] = '\0';
267  return ap0;
268 }
269 
270 static int add_metadata(int count, int type,
271  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
272 {
273  switch(type) {
274  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
275  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
276  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
277  default : return AVERROR_INVALIDDATA;
278  };
279 }
280 
281 /**
282  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
283  */
284 static uint16_t av_always_inline dng_process_color16(uint16_t value,
285  const uint16_t *lut,
286  float black_level,
287  float scale_factor)
288 {
289  float value_norm;
290 
291  // Lookup table lookup
292  value = lut[value];
293 
294  // Black level subtraction
295  // Color scaling
296  value_norm = ((float)value - black_level) * scale_factor;
297 
298  value = av_clip_uint16(lrintf(value_norm));
299 
300  return value;
301 }
302 
303 static uint16_t av_always_inline dng_process_color8(uint16_t value,
304  const uint16_t *lut,
305  float black_level,
306  float scale_factor)
307 {
308  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
309 }
310 
311 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
312  const uint8_t *src, int src_stride, int width, int height,
313  int is_single_comp, int is_u16, int odd_line)
314 {
315  float scale_factor[4];
316  int line, col;
317 
318  if (s->is_bayer) {
319  for (int i = 0; i < 4; i++)
320  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
321  } else {
322  for (int i = 0; i < 4; i++)
323  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
324  }
325 
326  if (is_single_comp) {
327  if (!is_u16)
328  return; /* <= 8bpp unsupported */
329 
330  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
331  (split vertically in the middle). */
332  for (line = 0; line < height / 2; line++) {
333  uint16_t *dst_u16 = (uint16_t *)dst;
334  const uint16_t *src_u16 = (const uint16_t *)src;
335 
336  /* Blit first half of input row row to initial row of output */
337  for (col = 0; col < width; col++)
338  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
339 
340  /* Advance the destination pointer by a row (source pointer remains in the same place) */
341  dst += dst_stride * sizeof(uint16_t);
342  dst_u16 = (uint16_t *)dst;
343 
344  /* Blit second half of input row row to next row of output */
345  for (col = 0; col < width; col++)
346  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
347 
348  dst += dst_stride * sizeof(uint16_t);
349  src += src_stride * sizeof(uint16_t);
350  }
351  } else {
352  /* Input and output image are the same size and the MJpeg decoder has done per-component
353  deinterleaving, so blitting here is straightforward. */
354  if (is_u16) {
355  for (line = 0; line < height; line++) {
356  uint16_t *dst_u16 = (uint16_t *)dst;
357  const uint16_t *src_u16 = (const uint16_t *)src;
358 
359  for (col = 0; col < width; col++)
360  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
361  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
362  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
363 
364  dst += dst_stride * sizeof(uint16_t);
365  src += src_stride * sizeof(uint16_t);
366  }
367  } else {
368  for (line = 0; line < height; line++) {
369  uint8_t *dst_u8 = dst;
370  const uint8_t *src_u8 = src;
371 
372  for (col = 0; col < width; col++)
373  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
374  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
375  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
376 
377  dst += dst_stride;
378  src += src_stride;
379  }
380  }
381  }
382 }
383 
385  unsigned int bpp, uint8_t* dst,
386  int usePtr, const uint8_t *src,
387  uint8_t c, int width, int offset)
388 {
389  switch (bpp) {
390  case 1:
391  while (--width >= 0) {
392  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
393  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
394  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
395  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
396  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
397  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
398  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
399  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
400  }
401  break;
402  case 2:
403  while (--width >= 0) {
404  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
405  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
406  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
407  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
408  }
409  break;
410  case 4:
411  while (--width >= 0) {
412  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
413  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
414  }
415  break;
416  case 10:
417  case 12:
418  case 14: {
419  uint16_t *dst16 = (uint16_t *)dst;
420  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
421  uint8_t shift = is_dng ? 0 : 16 - bpp;
422  GetBitContext gb;
423 
424  init_get_bits8(&gb, src, width);
425  for (int i = 0; i < s->width; i++) {
426  dst16[i] = get_bits(&gb, bpp) << shift;
427  }
428  }
429  break;
430  default:
431  if (usePtr) {
432  memcpy(dst + offset, src, width);
433  } else {
434  memset(dst + offset, c, width);
435  }
436  }
437 }
438 
439 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
440 {
441  int i;
442 
443  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
444  if (!s->deinvert_buf)
445  return AVERROR(ENOMEM);
446  for (i = 0; i < size; i++)
447  s->deinvert_buf[i] = ff_reverse[src[i]];
448 
449  return 0;
450 }
451 
452 static void unpack_gray(TiffContext *s, AVFrame *p,
453  const uint8_t *src, int lnum, int width, int bpp)
454 {
455  GetBitContext gb;
456  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
457 
458  init_get_bits8(&gb, src, width);
459 
460  for (int i = 0; i < s->width; i++) {
461  dst[i] = get_bits(&gb, bpp);
462  }
463 }
464 
465 static void unpack_yuv(TiffContext *s, AVFrame *p,
466  const uint8_t *src, int lnum)
467 {
468  int i, j, k;
469  int w = (s->width - 1) / s->subsampling[0] + 1;
470  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
471  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
472  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
473  for (i = 0; i < w; i++) {
474  for (j = 0; j < s->subsampling[1]; j++)
475  for (k = 0; k < s->subsampling[0]; k++)
476  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
477  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
478  *pu++ = *src++;
479  *pv++ = *src++;
480  }
481  }else{
482  for (i = 0; i < w; i++) {
483  for (j = 0; j < s->subsampling[1]; j++)
484  for (k = 0; k < s->subsampling[0]; k++)
485  p->data[0][(lnum + j) * p->linesize[0] +
486  i * s->subsampling[0] + k] = *src++;
487  *pu++ = *src++;
488  *pv++ = *src++;
489  }
490  }
491 }
492 
493 #if CONFIG_ZLIB
494 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
495  int size)
496 {
497  z_stream zstream = { 0 };
498  int zret;
499 
500  zstream.next_in = src;
501  zstream.avail_in = size;
502  zstream.next_out = dst;
503  zstream.avail_out = *len;
504  zret = inflateInit(&zstream);
505  if (zret != Z_OK) {
506  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
507  return zret;
508  }
509  zret = inflate(&zstream, Z_SYNC_FLUSH);
510  inflateEnd(&zstream);
511  *len = zstream.total_out;
512  return zret == Z_STREAM_END ? Z_OK : zret;
513 }
514 
515 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
516  const uint8_t *src, int size, int width, int lines,
517  int strip_start, int is_yuv)
518 {
519  uint8_t *zbuf;
520  unsigned long outlen;
521  int ret, line;
522  outlen = width * lines;
523  zbuf = av_malloc(outlen);
524  if (!zbuf)
525  return AVERROR(ENOMEM);
526  if (s->fill_order) {
527  if ((ret = deinvert_buffer(s, src, size)) < 0) {
528  av_free(zbuf);
529  return ret;
530  }
531  src = s->deinvert_buf;
532  }
533  ret = tiff_uncompress(zbuf, &outlen, src, size);
534  if (ret != Z_OK) {
535  av_log(s->avctx, AV_LOG_ERROR,
536  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
537  (unsigned long)width * lines, ret);
538  av_free(zbuf);
539  return AVERROR_UNKNOWN;
540  }
541  src = zbuf;
542  for (line = 0; line < lines; line++) {
543  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
544  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
545  } else {
546  memcpy(dst, src, width);
547  }
548  if (is_yuv) {
549  unpack_yuv(s, p, dst, strip_start + line);
550  line += s->subsampling[1] - 1;
551  }
552  dst += stride;
553  src += width;
554  }
555  av_free(zbuf);
556  return 0;
557 }
558 #endif
559 
560 #if CONFIG_LZMA
561 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
562  int size)
563 {
564  lzma_stream stream = LZMA_STREAM_INIT;
565  lzma_ret ret;
566 
567  stream.next_in = src;
568  stream.avail_in = size;
569  stream.next_out = dst;
570  stream.avail_out = *len;
571  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
572  if (ret != LZMA_OK) {
573  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
574  return ret;
575  }
576  ret = lzma_code(&stream, LZMA_RUN);
577  lzma_end(&stream);
578  *len = stream.total_out;
579  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
580 }
581 
582 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
583  const uint8_t *src, int size, int width, int lines,
584  int strip_start, int is_yuv)
585 {
586  uint64_t outlen = width * (uint64_t)lines;
587  int ret, line;
588  uint8_t *buf = av_malloc(outlen);
589  if (!buf)
590  return AVERROR(ENOMEM);
591  if (s->fill_order) {
592  if ((ret = deinvert_buffer(s, src, size)) < 0) {
593  av_free(buf);
594  return ret;
595  }
596  src = s->deinvert_buf;
597  }
598  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
599  if (ret != LZMA_OK) {
600  av_log(s->avctx, AV_LOG_ERROR,
601  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
602  (uint64_t)width * lines, ret);
603  av_free(buf);
604  return AVERROR_UNKNOWN;
605  }
606  src = buf;
607  for (line = 0; line < lines; line++) {
608  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
609  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
610  } else {
611  memcpy(dst, src, width);
612  }
613  if (is_yuv) {
614  unpack_yuv(s, p, dst, strip_start + line);
615  line += s->subsampling[1] - 1;
616  }
617  dst += stride;
618  src += width;
619  }
620  av_free(buf);
621  return 0;
622 }
623 #endif
624 
625 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
626  const uint8_t *src, int size, int width, int lines)
627 {
628  int line;
629  int ret;
630 
631  if (s->fill_order) {
632  if ((ret = deinvert_buffer(s, src, size)) < 0)
633  return ret;
634  src = s->deinvert_buf;
635  }
636  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
637  s->compr, s->fax_opts);
638  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
639  for (line = 0; line < lines; line++) {
640  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
641  dst += stride;
642  }
643  return ret;
644 }
645 
647  int tile_byte_count, int dst_x, int dst_y, int w, int h)
648 {
649  TiffContext *s = avctx->priv_data;
650  uint8_t *dst_data, *src_data;
651  uint32_t dst_offset; /* offset from dst buffer in pixels */
652  int is_single_comp, is_u16, pixel_size;
653  int ret;
654 
655  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
656  return AVERROR_INVALIDDATA;
657 
658  /* Prepare a packet and send to the MJPEG decoder */
659  av_packet_unref(s->jpkt);
660  s->jpkt->data = (uint8_t*)s->gb.buffer;
661  s->jpkt->size = tile_byte_count;
662 
663  if (s->is_bayer) {
664  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
665  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
666  image or not from its own data (and we need that information when decoding it). */
667  mjpegdecctx->bayer = 1;
668  }
669 
670  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
671  if (ret < 0) {
672  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
673  return ret;
674  }
675 
676  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
677  if (ret < 0) {
678  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
679 
680  /* Normally skip, error if explode */
681  if (avctx->err_recognition & AV_EF_EXPLODE)
682  return AVERROR_INVALIDDATA;
683  else
684  return 0;
685  }
686 
687  is_u16 = (s->bpp > 8);
688 
689  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
690 
691  if (s->jpgframe->width != s->avctx_mjpeg->width ||
692  s->jpgframe->height != s->avctx_mjpeg->height ||
693  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
694  return AVERROR_INVALIDDATA;
695 
696  /* See dng_blit for explanation */
697  if (s->avctx_mjpeg->width == w * 2 &&
698  s->avctx_mjpeg->height == h / 2 &&
699  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
700  is_single_comp = 1;
701  } else if (s->avctx_mjpeg->width >= w &&
702  s->avctx_mjpeg->height >= h &&
703  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
704  ) {
705  is_single_comp = 0;
706  } else
707  return AVERROR_INVALIDDATA;
708 
709  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
710 
711  if (is_single_comp && !is_u16) {
712  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
713  av_frame_unref(s->jpgframe);
714  return AVERROR_PATCHWELCOME;
715  }
716 
717  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
718  dst_data = frame->data[0] + dst_offset * pixel_size;
719  src_data = s->jpgframe->data[0];
720 
721  dng_blit(s,
722  dst_data,
723  frame->linesize[0] / pixel_size,
724  src_data,
725  s->jpgframe->linesize[0] / pixel_size,
726  w,
727  h,
728  is_single_comp,
729  is_u16, 0);
730 
731  av_frame_unref(s->jpgframe);
732 
733  return 0;
734 }
735 
736 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
737  const uint8_t *src, int size, int strip_start, int lines)
738 {
739  PutByteContext pb;
740  int c, line, pixels, code, ret;
741  const uint8_t *ssrc = src;
742  int width = ((s->width * s->bpp) + 7) >> 3;
744  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
745  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
746  desc->nb_components >= 3;
747  int is_dng;
748 
749  if (s->planar)
750  width /= s->bppcount;
751 
752  if (size <= 0)
753  return AVERROR_INVALIDDATA;
754 
755  if (is_yuv) {
756  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
757  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
758  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
759  if (s->yuv_line == NULL) {
760  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
761  return AVERROR(ENOMEM);
762  }
763  dst = s->yuv_line;
764  stride = 0;
765 
766  width = (s->width - 1) / s->subsampling[0] + 1;
767  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
768  av_assert0(width <= bytes_per_row);
769  av_assert0(s->bpp == 24);
770  }
771  if (s->is_bayer) {
772  av_assert0(width == (s->bpp * s->width + 7) >> 3);
773  }
774  av_assert0(!(s->is_bayer && is_yuv));
775  if (p->format == AV_PIX_FMT_GRAY12) {
776  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
777  if (s->yuv_line == NULL) {
778  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
779  return AVERROR(ENOMEM);
780  }
781  dst = s->yuv_line;
782  stride = 0;
783  }
784 
785  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
786 #if CONFIG_ZLIB
787  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
788  strip_start, is_yuv);
789 #else
790  av_log(s->avctx, AV_LOG_ERROR,
791  "zlib support not enabled, "
792  "deflate compression not supported\n");
793  return AVERROR(ENOSYS);
794 #endif
795  }
796  if (s->compr == TIFF_LZMA) {
797 #if CONFIG_LZMA
798  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
799  strip_start, is_yuv);
800 #else
801  av_log(s->avctx, AV_LOG_ERROR,
802  "LZMA support not enabled\n");
803  return AVERROR(ENOSYS);
804 #endif
805  }
806  if (s->compr == TIFF_LZW) {
807  if (s->fill_order) {
808  if ((ret = deinvert_buffer(s, src, size)) < 0)
809  return ret;
810  ssrc = src = s->deinvert_buf;
811  }
812  if (size > 1 && !src[0] && (src[1]&1)) {
813  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
814  }
815  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
816  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
817  return ret;
818  }
819  for (line = 0; line < lines; line++) {
820  pixels = ff_lzw_decode(s->lzw, dst, width);
821  if (pixels < width) {
822  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
823  pixels, width);
824  return AVERROR_INVALIDDATA;
825  }
826  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
827  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
828  if (is_yuv) {
829  unpack_yuv(s, p, dst, strip_start + line);
830  line += s->subsampling[1] - 1;
831  } else if (p->format == AV_PIX_FMT_GRAY12) {
832  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
833  }
834  dst += stride;
835  }
836  return 0;
837  }
838  if (s->compr == TIFF_CCITT_RLE ||
839  s->compr == TIFF_G3 ||
840  s->compr == TIFF_G4) {
841  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
842  return AVERROR_INVALIDDATA;
843 
844  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
845  }
846 
847  bytestream2_init(&s->gb, src, size);
848  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
849 
850  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
851 
852  /* Decode JPEG-encoded DNGs with strips */
853  if (s->compr == TIFF_NEWJPEG && is_dng) {
854  if (s->strips > 1) {
855  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
856  return AVERROR_PATCHWELCOME;
857  }
858  if (!s->is_bayer)
859  return AVERROR_PATCHWELCOME;
860  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
861  return ret;
862  return 0;
863  }
864 
865  if (is_dng && stride == 0)
866  return AVERROR_INVALIDDATA;
867 
868  for (line = 0; line < lines; line++) {
869  if (src - ssrc > size) {
870  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
871  return AVERROR_INVALIDDATA;
872  }
873 
874  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
875  break;
876  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
877  switch (s->compr) {
878  case TIFF_RAW:
879  if (ssrc + size - src < width)
880  return AVERROR_INVALIDDATA;
881 
882  if (!s->fill_order) {
883  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
884  dst, 1, src, 0, width, 0);
885  } else {
886  int i;
887  for (i = 0; i < width; i++)
888  dst[i] = ff_reverse[src[i]];
889  }
890 
891  /* Color processing for DNG images with uncompressed strips (non-tiled) */
892  if (is_dng) {
893  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
894 
895  is_u16 = (s->bpp / s->bppcount > 8);
896  pixel_size_bits = (is_u16 ? 16 : 8);
897  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
898 
899  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
900  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
901  dng_blit(s,
902  dst,
903  0, // no stride, only 1 line
904  dst,
905  0, // no stride, only 1 line
906  elements,
907  1,
908  0, // single-component variation is only preset in JPEG-encoded DNGs
909  is_u16,
910  (line + strip_start)&1);
911  }
912 
913  src += width;
914  break;
915  case TIFF_PACKBITS:
916  for (pixels = 0; pixels < width;) {
917  if (ssrc + size - src < 2) {
918  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
919  return AVERROR_INVALIDDATA;
920  }
921  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
922  if (code >= 0) {
923  code++;
924  if (pixels + code > width ||
925  ssrc + size - src < code) {
926  av_log(s->avctx, AV_LOG_ERROR,
927  "Copy went out of bounds\n");
928  return AVERROR_INVALIDDATA;
929  }
930  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
931  dst, 1, src, 0, code, pixels);
932  src += code;
933  pixels += code;
934  } else if (code != -128) { // -127..-1
935  code = (-code) + 1;
936  if (pixels + code > width) {
937  av_log(s->avctx, AV_LOG_ERROR,
938  "Run went out of bounds\n");
939  return AVERROR_INVALIDDATA;
940  }
941  c = *src++;
942  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
943  dst, 0, NULL, c, code, pixels);
944  pixels += code;
945  }
946  }
947  if (s->fill_order) {
948  int i;
949  for (i = 0; i < width; i++)
950  dst[i] = ff_reverse[dst[i]];
951  }
952  break;
953  }
954  if (is_yuv) {
955  unpack_yuv(s, p, dst, strip_start + line);
956  line += s->subsampling[1] - 1;
957  } else if (p->format == AV_PIX_FMT_GRAY12) {
958  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
959  }
960  dst += stride;
961  }
962  return 0;
963 }
964 
966  const AVPacket *avpkt)
967 {
968  TiffContext *s = avctx->priv_data;
969  int tile_idx;
970  int tile_offset_offset, tile_offset;
971  int tile_byte_count_offset, tile_byte_count;
972  int tile_count_x, tile_count_y;
973  int tile_width, tile_length;
974  int has_width_leftover, has_height_leftover;
975  int tile_x = 0, tile_y = 0;
976  int pos_x = 0, pos_y = 0;
977  int ret;
978 
979  if (s->tile_width <= 0 || s->tile_length <= 0)
980  return AVERROR_INVALIDDATA;
981 
982  has_width_leftover = (s->width % s->tile_width != 0);
983  has_height_leftover = (s->height % s->tile_length != 0);
984 
985  /* Calculate tile counts (round up) */
986  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
987  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
988 
989  /* Iterate over the number of tiles */
990  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
991  tile_x = tile_idx % tile_count_x;
992  tile_y = tile_idx / tile_count_x;
993 
994  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
995  tile_width = s->width % s->tile_width;
996  else
997  tile_width = s->tile_width;
998 
999  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1000  tile_length = s->height % s->tile_length;
1001  else
1002  tile_length = s->tile_length;
1003 
1004  /* Read tile offset */
1005  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1006  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1007  tile_offset = ff_tget_long(&s->gb, s->le);
1008 
1009  /* Read tile byte size */
1010  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1011  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1012  tile_byte_count = ff_tget_long(&s->gb, s->le);
1013 
1014  /* Seek to tile data */
1015  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1016 
1017  /* Decode JPEG tile and copy it in the reference frame */
1018  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1019 
1020  if (ret < 0)
1021  return ret;
1022 
1023  /* Advance current positions */
1024  pos_x += tile_width;
1025  if (tile_x == tile_count_x - 1) { // If on the right edge
1026  pos_x = 0;
1027  pos_y += tile_length;
1028  }
1029  }
1030 
1031  /* Frame is ready to be output */
1034 
1035  return avpkt->size;
1036 }
1037 
1039 {
1040  int ret;
1041  int create_gray_palette = 0;
1042 
1043  // make sure there is no aliasing in the following switch
1044  if (s->bpp > 128 || s->bppcount >= 10) {
1045  av_log(s->avctx, AV_LOG_ERROR,
1046  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1047  s->bpp, s->bppcount);
1048  return AVERROR_INVALIDDATA;
1049  }
1050 
1051  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1052  case 11:
1053  if (!s->palette_is_set) {
1054  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1055  break;
1056  }
1057  case 21:
1058  case 41:
1059  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1060  if (!s->palette_is_set) {
1061  create_gray_palette = 1;
1062  }
1063  break;
1064  case 81:
1065  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1066  break;
1067  case 121:
1068  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1069  break;
1070  case 100081:
1071  switch (AV_RL32(s->pattern)) {
1072  case 0x02010100:
1073  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1074  break;
1075  case 0x00010102:
1076  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1077  break;
1078  case 0x01000201:
1079  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1080  break;
1081  case 0x01020001:
1082  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1083  break;
1084  default:
1085  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1086  AV_RL32(s->pattern));
1087  return AVERROR_PATCHWELCOME;
1088  }
1089  break;
1090  case 100101:
1091  case 100121:
1092  case 100141:
1093  case 100161:
1094  switch (AV_RL32(s->pattern)) {
1095  case 0x02010100:
1096  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1097  break;
1098  case 0x00010102:
1099  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1100  break;
1101  case 0x01000201:
1102  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1103  break;
1104  case 0x01020001:
1105  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1106  break;
1107  default:
1108  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1109  AV_RL32(s->pattern));
1110  return AVERROR_PATCHWELCOME;
1111  }
1112  break;
1113  case 243:
1114  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1115  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1116  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1117  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1118  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1119  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1120  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1121  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1122  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1123  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1124  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1125  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1126  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1127  } else {
1128  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1129  return AVERROR_PATCHWELCOME;
1130  }
1131  } else
1132  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1133  break;
1134  case 161:
1135  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1136  break;
1137  case 162:
1138  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1139  break;
1140  case 322:
1141  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1142  break;
1143  case 324:
1144  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1145  break;
1146  case 405:
1147  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1148  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1149  else {
1150  av_log(s->avctx, AV_LOG_ERROR,
1151  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1152  return AVERROR_PATCHWELCOME;
1153  }
1154  break;
1155  case 483:
1156  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1157  break;
1158  case 644:
1159  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1160  break;
1161  case 10243:
1162  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1163  break;
1164  case 10324:
1165  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1166  break;
1167  case 10483:
1168  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1169  break;
1170  case 10644:
1171  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1172  break;
1173  case 963:
1174  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1175  break;
1176  case 1284:
1177  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1178  break;
1179  case 10963:
1180  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1181  break;
1182  case 11284:
1183  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1184  break;
1185  default:
1186  av_log(s->avctx, AV_LOG_ERROR,
1187  "This format is not supported (bpp=%d, bppcount=%d)\n",
1188  s->bpp, s->bppcount);
1189  return AVERROR_INVALIDDATA;
1190  }
1191 
1192  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1193  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1194  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1195  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1196  desc->nb_components < 3) {
1197  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1198  return AVERROR_INVALIDDATA;
1199  }
1200  }
1201 
1202  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1203  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1204  if (ret < 0)
1205  return ret;
1206  }
1207 
1208  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1209  return 0;
1210 
1211  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1212  return ret;
1213  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1214  if (!create_gray_palette)
1215  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1216  else {
1217  /* make default grayscale pal */
1218  int i;
1219  uint32_t *pal = (uint32_t *)frame->data[1];
1220  for (i = 0; i < 1<<s->bpp; i++)
1221  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1222  }
1223  }
1224  return 1;
1225 }
1226 
1227 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1228 {
1229  int offset = tag == TIFF_YRES ? 2 : 0;
1230  s->res[offset++] = num;
1231  s->res[offset] = den;
1232  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1233  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1234  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1235  if (num > INT64_MAX || den > INT64_MAX) {
1236  num = num >> 1;
1237  den = den >> 1;
1238  }
1239  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1240  num, den, INT32_MAX);
1241  if (!s->avctx->sample_aspect_ratio.den)
1242  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1243  }
1244 }
1245 
1247 {
1248  AVFrameSideData *sd;
1249  GetByteContext gb_temp;
1250  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1251  int i, start;
1252  int pos;
1253  int ret;
1254  double *dp;
1255 
1256  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1257  if (ret < 0) {
1258  goto end;
1259  }
1260  if (tag <= s->last_tag)
1261  return AVERROR_INVALIDDATA;
1262 
1263  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1264  if (tag != TIFF_STRIP_SIZE)
1265  s->last_tag = tag;
1266 
1267  off = bytestream2_tell(&s->gb);
1268  if (count == 1) {
1269  switch (type) {
1270  case TIFF_BYTE:
1271  case TIFF_SHORT:
1272  case TIFF_LONG:
1273  value = ff_tget(&s->gb, type, s->le);
1274  break;
1275  case TIFF_RATIONAL:
1276  value = ff_tget_long(&s->gb, s->le);
1277  value2 = ff_tget_long(&s->gb, s->le);
1278  if (!value2) {
1279  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1280  value2 = 1;
1281  }
1282 
1283  break;
1284  case TIFF_STRING:
1285  if (count <= 4) {
1286  break;
1287  }
1288  default:
1289  value = UINT_MAX;
1290  }
1291  }
1292 
1293  switch (tag) {
1294  case TIFF_SUBFILE:
1295  s->is_thumbnail = (value != 0);
1296  break;
1297  case TIFF_WIDTH:
1298  s->width = value;
1299  break;
1300  case TIFF_HEIGHT:
1301  s->height = value;
1302  break;
1303  case TIFF_BPP:
1304  if (count > 5 || count <= 0) {
1305  av_log(s->avctx, AV_LOG_ERROR,
1306  "This format is not supported (bpp=%d, %d components)\n",
1307  value, count);
1308  return AVERROR_INVALIDDATA;
1309  }
1310  s->bppcount = count;
1311  if (count == 1)
1312  s->bpp = value;
1313  else {
1314  switch (type) {
1315  case TIFF_BYTE:
1316  case TIFF_SHORT:
1317  case TIFF_LONG:
1318  s->bpp = 0;
1319  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1320  return AVERROR_INVALIDDATA;
1321  for (i = 0; i < count; i++)
1322  s->bpp += ff_tget(&s->gb, type, s->le);
1323  break;
1324  default:
1325  s->bpp = -1;
1326  }
1327  }
1328  break;
1330  if (count != 1) {
1331  av_log(s->avctx, AV_LOG_ERROR,
1332  "Samples per pixel requires a single value, many provided\n");
1333  return AVERROR_INVALIDDATA;
1334  }
1335  if (value > 5 || value <= 0) {
1336  av_log(s->avctx, AV_LOG_ERROR,
1337  "Invalid samples per pixel %d\n", value);
1338  return AVERROR_INVALIDDATA;
1339  }
1340  if (s->bppcount == 1)
1341  s->bpp *= value;
1342  s->bppcount = value;
1343  break;
1344  case TIFF_COMPR:
1345  s->compr = value;
1346  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1347  s->predictor = 0;
1348  switch (s->compr) {
1349  case TIFF_RAW:
1350  case TIFF_PACKBITS:
1351  case TIFF_LZW:
1352  case TIFF_CCITT_RLE:
1353  break;
1354  case TIFF_G3:
1355  case TIFF_G4:
1356  s->fax_opts = 0;
1357  break;
1358  case TIFF_DEFLATE:
1359  case TIFF_ADOBE_DEFLATE:
1360 #if CONFIG_ZLIB
1361  break;
1362 #else
1363  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1364  return AVERROR(ENOSYS);
1365 #endif
1366  case TIFF_JPEG:
1367  case TIFF_NEWJPEG:
1368  s->is_jpeg = 1;
1369  break;
1370  case TIFF_LZMA:
1371 #if CONFIG_LZMA
1372  break;
1373 #else
1374  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1375  return AVERROR(ENOSYS);
1376 #endif
1377  default:
1378  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1379  s->compr);
1380  return AVERROR_INVALIDDATA;
1381  }
1382  break;
1383  case TIFF_ROWSPERSTRIP:
1384  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1385  value = s->height;
1386  s->rps = FFMIN(value, s->height);
1387  break;
1388  case TIFF_STRIP_OFFS:
1389  if (count == 1) {
1390  if (value > INT_MAX) {
1391  av_log(s->avctx, AV_LOG_ERROR,
1392  "strippos %u too large\n", value);
1393  return AVERROR_INVALIDDATA;
1394  }
1395  s->strippos = 0;
1396  s->stripoff = value;
1397  } else
1398  s->strippos = off;
1399  s->strips = count;
1400  if (s->strips == s->bppcount)
1401  s->rps = s->height;
1402  s->sot = type;
1403  break;
1404  case TIFF_STRIP_SIZE:
1405  if (count == 1) {
1406  if (value > INT_MAX) {
1407  av_log(s->avctx, AV_LOG_ERROR,
1408  "stripsize %u too large\n", value);
1409  return AVERROR_INVALIDDATA;
1410  }
1411  s->stripsizesoff = 0;
1412  s->stripsize = value;
1413  s->strips = 1;
1414  } else {
1415  s->stripsizesoff = off;
1416  }
1417  s->strips = count;
1418  s->sstype = type;
1419  break;
1420  case TIFF_XRES:
1421  case TIFF_YRES:
1422  set_sar(s, tag, value, value2);
1423  break;
1424  case TIFF_TILE_OFFSETS:
1425  s->tile_offsets_offset = off;
1426  s->is_tiled = 1;
1427  break;
1428  case TIFF_TILE_BYTE_COUNTS:
1429  s->tile_byte_counts_offset = off;
1430  break;
1431  case TIFF_TILE_LENGTH:
1432  s->tile_length = value;
1433  break;
1434  case TIFF_TILE_WIDTH:
1435  s->tile_width = value;
1436  break;
1437  case TIFF_PREDICTOR:
1438  s->predictor = value;
1439  break;
1440  case TIFF_SUB_IFDS:
1441  if (count == 1)
1442  s->sub_ifd = value;
1443  else if (count > 1)
1444  s->sub_ifd = ff_tget_long(&s->gb, s->le); /** Only get the first SubIFD */
1445  break;
1448  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1449  return AVERROR_INVALIDDATA;
1450  for (int i = 0; i < count; i++)
1451  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1452  s->white_level = s->dng_lut[count-1];
1453  break;
1454  case DNG_BLACK_LEVEL:
1455  if (count > FF_ARRAY_ELEMS(s->black_level))
1456  return AVERROR_INVALIDDATA;
1457  s->black_level[0] = value / (float)value2;
1458  for (int i = 0; i < count && count > 1; i++) {
1459  if (type == TIFF_RATIONAL) {
1460  value = ff_tget_long(&s->gb, s->le);
1461  value2 = ff_tget_long(&s->gb, s->le);
1462  if (!value2) {
1463  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1464  value2 = 1;
1465  }
1466 
1467  s->black_level[i] = value / (float)value2;
1468  } else if (type == TIFF_SRATIONAL) {
1469  int value = ff_tget_long(&s->gb, s->le);
1470  int value2 = ff_tget_long(&s->gb, s->le);
1471  if (!value2) {
1472  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1473  value2 = 1;
1474  }
1475 
1476  s->black_level[i] = value / (float)value2;
1477  } else {
1478  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1479  }
1480  }
1481  for (int i = count; i < 4 && count > 0; i++)
1482  s->black_level[i] = s->black_level[count - 1];
1483  break;
1484  case DNG_WHITE_LEVEL:
1485  s->white_level = value;
1486  break;
1487  case TIFF_CFA_PATTERN_DIM:
1488  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1489  ff_tget(&s->gb, type, s->le) != 2)) {
1490  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1491  return AVERROR_INVALIDDATA;
1492  }
1493  break;
1494  case TIFF_CFA_PATTERN:
1495  s->is_bayer = 1;
1496  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1497  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1498  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1499  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1500  break;
1501  case TIFF_PHOTOMETRIC:
1502  switch (value) {
1505  case TIFF_PHOTOMETRIC_RGB:
1509  case TIFF_PHOTOMETRIC_CFA:
1510  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1511  s->photometric = value;
1512  break;
1520  "PhotometricInterpretation 0x%04X",
1521  value);
1522  return AVERROR_PATCHWELCOME;
1523  default:
1524  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1525  "unknown\n", value);
1526  return AVERROR_INVALIDDATA;
1527  }
1528  break;
1529  case TIFF_FILL_ORDER:
1530  if (value < 1 || value > 2) {
1531  av_log(s->avctx, AV_LOG_ERROR,
1532  "Unknown FillOrder value %d, trying default one\n", value);
1533  value = 1;
1534  }
1535  s->fill_order = value - 1;
1536  break;
1537  case TIFF_PAL: {
1538  GetByteContext pal_gb[3];
1539  off = type_sizes[type];
1540  if (count / 3 > 256 ||
1541  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1542  return AVERROR_INVALIDDATA;
1543 
1544  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1545  bytestream2_skip(&pal_gb[1], count / 3 * off);
1546  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1547 
1548  off = (type_sizes[type] - 1) << 3;
1549  if (off > 31U) {
1550  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1551  return AVERROR_INVALIDDATA;
1552  }
1553 
1554  for (i = 0; i < count / 3; i++) {
1555  uint32_t p = 0xFF000000;
1556  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1557  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1558  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1559  s->palette[i] = p;
1560  }
1561  s->palette_is_set = 1;
1562  break;
1563  }
1564  case TIFF_PLANAR:
1565  s->planar = value == 2;
1566  break;
1568  if (count != 2) {
1569  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1570  return AVERROR_INVALIDDATA;
1571  }
1572  for (i = 0; i < count; i++) {
1573  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1574  if (s->subsampling[i] <= 0) {
1575  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1576  s->subsampling[i] = 1;
1577  return AVERROR_INVALIDDATA;
1578  }
1579  }
1580  break;
1581  case TIFF_T4OPTIONS:
1582  if (s->compr == TIFF_G3)
1583  s->fax_opts = value;
1584  break;
1585  case TIFF_T6OPTIONS:
1586  if (s->compr == TIFF_G4)
1587  s->fax_opts = value;
1588  break;
1589 #define ADD_METADATA(count, name, sep)\
1590  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1591  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1592  goto end;\
1593  }
1595  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1596  break;
1598  ADD_METADATA(count, "ModelTransformationTag", NULL);
1599  break;
1600  case TIFF_MODEL_TIEPOINT:
1601  ADD_METADATA(count, "ModelTiepointTag", NULL);
1602  break;
1604  if (s->geotag_count) {
1605  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1606  return AVERROR_INVALIDDATA;
1607  }
1608  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1609  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1610  s->geotag_count = ff_tget_short(&s->gb, s->le);
1611  if (s->geotag_count > count / 4 - 1) {
1612  s->geotag_count = count / 4 - 1;
1613  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1614  }
1615  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1616  || s->geotag_count == 0) {
1617  s->geotag_count = 0;
1618  return -1;
1619  }
1620  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1621  if (!s->geotags) {
1622  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1623  s->geotag_count = 0;
1624  goto end;
1625  }
1626  for (i = 0; i < s->geotag_count; i++) {
1627  unsigned val;
1628  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1629  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1630  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1631  val = ff_tget_short(&s->gb, s->le);
1632 
1633  if (!s->geotags[i].type) {
1634  const char *str = get_geokey_val(s->geotags[i].key, val);
1635 
1636  s->geotags[i].val = str ? av_strdup(str) : av_asprintf("Unknown-%u", val);
1637  if (!s->geotags[i].val)
1638  return AVERROR(ENOMEM);
1639  } else
1640  s->geotags[i].offset = val;
1641  }
1642  break;
1644  if (count >= INT_MAX / sizeof(int64_t))
1645  return AVERROR_INVALIDDATA;
1646  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1647  return AVERROR_INVALIDDATA;
1648  dp = av_malloc_array(count, sizeof(double));
1649  if (!dp) {
1650  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1651  goto end;
1652  }
1653  for (i = 0; i < count; i++)
1654  dp[i] = ff_tget_double(&s->gb, s->le);
1655  for (i = 0; i < s->geotag_count; i++) {
1656  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1657  if (s->geotags[i].count == 0
1658  || s->geotags[i].offset + s->geotags[i].count > count) {
1659  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1660  } else if (s->geotags[i].val) {
1661  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1662  } else {
1663  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1664  if (!ap) {
1665  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1666  av_freep(&dp);
1667  return AVERROR(ENOMEM);
1668  }
1669  s->geotags[i].val = ap;
1670  }
1671  }
1672  }
1673  av_freep(&dp);
1674  break;
1675  case TIFF_GEO_ASCII_PARAMS:
1676  pos = bytestream2_tell(&s->gb);
1677  for (i = 0; i < s->geotag_count; i++) {
1678  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1679  if (s->geotags[i].count == 0
1680  || s->geotags[i].offset + s->geotags[i].count > count) {
1681  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1682  } else {
1683  char *ap;
1684 
1685  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1686  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1687  return AVERROR_INVALIDDATA;
1688  if (s->geotags[i].val)
1689  return AVERROR_INVALIDDATA;
1690  ap = av_malloc(s->geotags[i].count);
1691  if (!ap) {
1692  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1693  return AVERROR(ENOMEM);
1694  }
1695  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1696  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1697  s->geotags[i].val = ap;
1698  }
1699  }
1700  }
1701  break;
1702  case TIFF_ICC_PROFILE:
1703  gb_temp = s->gb;
1704  bytestream2_seek(&gb_temp, off, SEEK_SET);
1705 
1706  if (bytestream2_get_bytes_left(&gb_temp) < count)
1707  return AVERROR_INVALIDDATA;
1708 
1710  if (ret < 0)
1711  return ret;
1712  if (sd)
1713  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1714  break;
1715  case TIFF_ARTIST:
1716  ADD_METADATA(count, "artist", NULL);
1717  break;
1718  case TIFF_COPYRIGHT:
1719  ADD_METADATA(count, "copyright", NULL);
1720  break;
1721  case TIFF_DATE:
1722  ADD_METADATA(count, "date", NULL);
1723  break;
1724  case TIFF_DOCUMENT_NAME:
1725  ADD_METADATA(count, "document_name", NULL);
1726  break;
1727  case TIFF_HOST_COMPUTER:
1728  ADD_METADATA(count, "computer", NULL);
1729  break;
1731  ADD_METADATA(count, "description", NULL);
1732  break;
1733  case TIFF_MAKE:
1734  ADD_METADATA(count, "make", NULL);
1735  break;
1736  case TIFF_MODEL:
1737  ADD_METADATA(count, "model", NULL);
1738  break;
1739  case TIFF_PAGE_NAME:
1740  ADD_METADATA(count, "page_name", NULL);
1741  break;
1742  case TIFF_PAGE_NUMBER:
1743  ADD_METADATA(count, "page_number", " / ");
1744  // need to seek back to re-read the page number
1745  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1746  // read the page number
1747  s->cur_page = ff_tget_short(&s->gb, s->le);
1748  // get back to where we were before the previous seek
1749  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1750  break;
1751  case TIFF_SOFTWARE_NAME:
1752  ADD_METADATA(count, "software", NULL);
1753  break;
1754  case DNG_VERSION:
1755  if (count == 4) {
1756  unsigned int ver[4];
1757  ver[0] = ff_tget(&s->gb, type, s->le);
1758  ver[1] = ff_tget(&s->gb, type, s->le);
1759  ver[2] = ff_tget(&s->gb, type, s->le);
1760  ver[3] = ff_tget(&s->gb, type, s->le);
1761 
1762  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1763  ver[0], ver[1], ver[2], ver[3]);
1764 
1766  }
1767  break;
1768  case DNG_ANALOG_BALANCE:
1769  if (type != TIFF_RATIONAL)
1770  break;
1771 
1772  for (int i = 0; i < 3; i++) {
1773  value = ff_tget_long(&s->gb, s->le);
1774  value2 = ff_tget_long(&s->gb, s->le);
1775  if (!value2) {
1776  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1777  value2 = 1;
1778  }
1779 
1780  s->analog_balance[i] = value / (float)value2;
1781  }
1782  break;
1783  case DNG_AS_SHOT_NEUTRAL:
1784  if (type != TIFF_RATIONAL)
1785  break;
1786 
1787  for (int i = 0; i < 3; i++) {
1788  value = ff_tget_long(&s->gb, s->le);
1789  value2 = ff_tget_long(&s->gb, s->le);
1790  if (!value2) {
1791  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1792  value2 = 1;
1793  }
1794 
1795  s->as_shot_neutral[i] = value / (float)value2;
1796  }
1797  break;
1798  case DNG_AS_SHOT_WHITE_XY:
1799  if (type != TIFF_RATIONAL)
1800  break;
1801 
1802  for (int i = 0; i < 2; i++) {
1803  value = ff_tget_long(&s->gb, s->le);
1804  value2 = ff_tget_long(&s->gb, s->le);
1805  if (!value2) {
1806  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1807  value2 = 1;
1808  }
1809 
1810  s->as_shot_white[i] = value / (float)value2;
1811  }
1812  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1813  for (int i = 0; i < 3; i++) {
1814  s->as_shot_white[i] /= d65_white[i];
1815  }
1816  break;
1817  case DNG_COLOR_MATRIX1:
1818  case DNG_COLOR_MATRIX2:
1819  for (int i = 0; i < 3; i++) {
1820  for (int j = 0; j < 3; j++) {
1821  int value = ff_tget_long(&s->gb, s->le);
1822  int value2 = ff_tget_long(&s->gb, s->le);
1823  if (!value2) {
1824  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1825  value2 = 1;
1826  }
1827  s->color_matrix[i][j] = value / (float)value2;
1828  }
1829  s->use_color_matrix = 1;
1830  }
1831  break;
1834  for (int i = 0; i < 3; i++) {
1835  for (int j = 0; j < 3; j++) {
1836  int value = ff_tget_long(&s->gb, s->le);
1837  int value2 = ff_tget_long(&s->gb, s->le);
1838  if (!value2) {
1839  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1840  value2 = 1;
1841  }
1842  s->camera_calibration[i][j] = value / (float)value2;
1843  }
1844  }
1845  break;
1846  case CINEMADNG_TIME_CODES:
1847  case CINEMADNG_FRAME_RATE:
1848  case CINEMADNG_T_STOP:
1849  case CINEMADNG_REEL_NAME:
1852  break;
1853  default:
1854  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1855  av_log(s->avctx, AV_LOG_ERROR,
1856  "Unknown or unsupported tag %d/0x%0X\n",
1857  tag, tag);
1858  return AVERROR_INVALIDDATA;
1859  }
1860  }
1861 end:
1862  if (s->bpp > 128U) {
1863  av_log(s->avctx, AV_LOG_ERROR,
1864  "This format is not supported (bpp=%d, %d components)\n",
1865  s->bpp, count);
1866  s->bpp = 0;
1867  return AVERROR_INVALIDDATA;
1868  }
1869  bytestream2_seek(&s->gb, start, SEEK_SET);
1870  return 0;
1871 }
1872 
1873 static const float xyz2rgb[3][3] = {
1874  { 0.412453f, 0.357580f, 0.180423f },
1875  { 0.212671f, 0.715160f, 0.072169f },
1876  { 0.019334f, 0.119193f, 0.950227f },
1877 };
1878 
1880  float rgb2cam[3][4],
1881  double cam2xyz[4][3])
1882 {
1883  double cam2rgb[4][3], num;
1884  int i, j, k;
1885 
1886  for (i = 0; i < 3; i++) {
1887  for (j = 0; j < 3; j++) {
1888  cam2rgb[i][j] = 0.;
1889  for (k = 0; k < 3; k++)
1890  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1891  }
1892  }
1893 
1894  for (i = 0; i < 3; i++) {
1895  for (num = j = 0; j < 3; j++)
1896  num += cam2rgb[i][j];
1897  if (!num)
1898  num = 1;
1899  for (j = 0; j < 3; j++)
1900  cam2rgb[i][j] /= num;
1901  s->premultiply[i] = 1.f / num;
1902  }
1903 }
1904 
1905 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1906  int *got_frame, AVPacket *avpkt)
1907 {
1908  TiffContext *const s = avctx->priv_data;
1909  unsigned off, last_off = 0;
1910  int le, ret, plane, planes;
1911  int i, j, entries, stride;
1912  unsigned soff, ssize;
1913  uint8_t *dst;
1914  GetByteContext stripsizes;
1915  GetByteContext stripdata;
1916  int retry_for_subifd, retry_for_page;
1917  int is_dng;
1918  int has_tile_bits, has_strip_bits;
1919 
1920  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1921 
1922  // parse image header
1923  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1924  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1925  return ret;
1926  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1927  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1928  return AVERROR_INVALIDDATA;
1929  }
1930  s->le = le;
1931  // TIFF_BPP is not a required tag and defaults to 1
1932 
1933  s->tiff_type = TIFF_TYPE_TIFF;
1934  s->use_color_matrix = 0;
1935 again:
1936  s->is_thumbnail = 0;
1937  s->bppcount = s->bpp = 1;
1938  s->photometric = TIFF_PHOTOMETRIC_NONE;
1939  s->compr = TIFF_RAW;
1940  s->fill_order = 0;
1941  s->white_level = 0;
1942  s->is_bayer = 0;
1943  s->is_tiled = 0;
1944  s->is_jpeg = 0;
1945  s->cur_page = 0;
1946  s->last_tag = 0;
1947 
1948  for (i = 0; i < 65536; i++)
1949  s->dng_lut[i] = i;
1950 
1951  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1952  s->black_level[i] = 0.f;
1953 
1954  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1955  s->as_shot_neutral[i] = 0.f;
1956 
1957  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1958  s->as_shot_white[i] = 1.f;
1959 
1960  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1961  s->analog_balance[i] = 1.f;
1962 
1963  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1964  s->premultiply[i] = 1.f;
1965 
1966  for (i = 0; i < 4; i++)
1967  for (j = 0; j < 4; j++)
1968  s->camera_calibration[i][j] = i == j;
1969 
1970  free_geotags(s);
1971 
1972  // Reset these offsets so we can tell if they were set this frame
1973  s->stripsizesoff = s->strippos = 0;
1974  /* parse image file directory */
1975  bytestream2_seek(&s->gb, off, SEEK_SET);
1976  entries = ff_tget_short(&s->gb, le);
1977  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1978  return AVERROR_INVALIDDATA;
1979  for (i = 0; i < entries; i++) {
1980  if ((ret = tiff_decode_tag(s, p)) < 0)
1981  return ret;
1982  }
1983 
1984  if (s->get_thumbnail && !s->is_thumbnail) {
1985  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1986  return AVERROR_EOF;
1987  }
1988 
1989  /** whether we should process this IFD's SubIFD */
1990  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1991  /** whether we should process this multi-page IFD's next page */
1992  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1993 
1994  if (retry_for_page) {
1995  // set offset to the next IFD
1996  off = ff_tget_long(&s->gb, le);
1997  } else if (retry_for_subifd) {
1998  // set offset to the SubIFD
1999  off = s->sub_ifd;
2000  }
2001 
2002  if (retry_for_subifd || retry_for_page) {
2003  if (!off) {
2004  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2005  return AVERROR_INVALIDDATA;
2006  }
2007  if (off <= last_off) {
2008  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2009  return AVERROR_INVALIDDATA;
2010  }
2011  last_off = off;
2012  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2013  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2014  return AVERROR_INVALIDDATA;
2015  }
2016  s->sub_ifd = 0;
2017  goto again;
2018  }
2019 
2020  /* At this point we've decided on which (Sub)IFD to process */
2021 
2022  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2023 
2024  for (i = 0; i<s->geotag_count; i++) {
2025  const char *keyname = get_geokey_name(s->geotags[i].key);
2026  if (!keyname) {
2027  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2028  continue;
2029  }
2030  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2031  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2032  continue;
2033  }
2034  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, AV_DICT_DONT_STRDUP_VAL);
2035  s->geotags[i].val = NULL;
2036  if (ret<0) {
2037  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2038  return ret;
2039  }
2040  }
2041 
2042  if (is_dng) {
2043  double cam2xyz[4][3];
2044  float cmatrix[3][4];
2045  float pmin = FLT_MAX;
2046  int bps;
2047 
2048  for (i = 0; i < 3; i++) {
2049  for (j = 0; j < 3; j++)
2050  s->camera_calibration[i][j] *= s->analog_balance[i];
2051  }
2052 
2053  if (!s->use_color_matrix) {
2054  for (i = 0; i < 3; i++) {
2055  if (s->camera_calibration[i][i])
2056  s->premultiply[i] /= s->camera_calibration[i][i];
2057  }
2058  } else {
2059  for (int c = 0; c < 3; c++) {
2060  for (i = 0; i < 3; i++) {
2061  cam2xyz[c][i] = 0.;
2062  for (j = 0; j < 3; j++)
2063  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2064  }
2065  }
2066 
2067  camera_xyz_coeff(s, cmatrix, cam2xyz);
2068  }
2069 
2070  for (int c = 0; c < 3; c++)
2071  pmin = fminf(pmin, s->premultiply[c]);
2072 
2073  for (int c = 0; c < 3; c++)
2074  s->premultiply[c] /= pmin;
2075 
2076  if (s->bpp % s->bppcount)
2077  return AVERROR_INVALIDDATA;
2078  bps = s->bpp / s->bppcount;
2079  if (bps < 8 || bps > 32)
2080  return AVERROR_INVALIDDATA;
2081 
2082  if (s->white_level == 0)
2083  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2084 
2085  if (s->white_level <= s->black_level[0]) {
2086  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2087  s->black_level[0], s->white_level);
2088  return AVERROR_INVALIDDATA;
2089  }
2090 
2091  if (s->planar)
2092  return AVERROR_PATCHWELCOME;
2093  }
2094 
2095  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2096  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2097  return AVERROR_INVALIDDATA;
2098  }
2099 
2100  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2101  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2102 
2103  if (has_tile_bits && has_strip_bits) {
2104  int tiled_dng = s->is_tiled && is_dng;
2105  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2106  if (!tiled_dng)
2107  return AVERROR_INVALIDDATA;
2108  }
2109 
2110  /* now we have the data and may start decoding */
2111  if ((ret = init_image(s, p)) <= 0)
2112  return ret;
2113 
2114  if (!s->is_tiled || has_strip_bits) {
2115  if (s->strips == 1 && !s->stripsize) {
2116  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2117  s->stripsize = avpkt->size - s->stripoff;
2118  }
2119 
2120  if (s->stripsizesoff) {
2121  if (s->stripsizesoff >= (unsigned)avpkt->size)
2122  return AVERROR_INVALIDDATA;
2123  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2124  avpkt->size - s->stripsizesoff);
2125  }
2126  if (s->strippos) {
2127  if (s->strippos >= (unsigned)avpkt->size)
2128  return AVERROR_INVALIDDATA;
2129  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2130  avpkt->size - s->strippos);
2131  }
2132 
2133  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2134  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2135  return AVERROR_INVALIDDATA;
2136  }
2137  }
2138 
2139  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2140  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2142  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2144  }
2145 
2146  /* Handle DNG images with JPEG-compressed tiles */
2147 
2148  if (is_dng && s->is_tiled) {
2149  if (!s->is_jpeg) {
2150  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2151  return AVERROR_PATCHWELCOME;
2152  } else if (!s->is_bayer) {
2153  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2154  return AVERROR_PATCHWELCOME;
2155  } else {
2156  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2157  *got_frame = 1;
2158  return ret;
2159  }
2160  }
2161 
2162  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2163 
2164  planes = s->planar ? s->bppcount : 1;
2165  for (plane = 0; plane < planes; plane++) {
2166  uint8_t *five_planes = NULL;
2167  int remaining = avpkt->size;
2168  int decoded_height;
2169  stride = p->linesize[plane];
2170  dst = p->data[plane];
2171  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2172  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2173  stride = stride * 5 / 4;
2174  five_planes =
2175  dst = av_malloc(stride * s->height);
2176  if (!dst)
2177  return AVERROR(ENOMEM);
2178  }
2179  for (i = 0; i < s->height; i += s->rps) {
2180  if (i)
2181  dst += s->rps * stride;
2182  if (s->stripsizesoff)
2183  ssize = ff_tget(&stripsizes, s->sstype, le);
2184  else
2185  ssize = s->stripsize;
2186 
2187  if (s->strippos)
2188  soff = ff_tget(&stripdata, s->sot, le);
2189  else
2190  soff = s->stripoff;
2191 
2192  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2193  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2194  av_freep(&five_planes);
2195  return AVERROR_INVALIDDATA;
2196  }
2197  remaining -= ssize;
2198  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2199  FFMIN(s->rps, s->height - i))) < 0) {
2200  if (avctx->err_recognition & AV_EF_EXPLODE) {
2201  av_freep(&five_planes);
2202  return ret;
2203  }
2204  break;
2205  }
2206  }
2207  decoded_height = FFMIN(i, s->height);
2208 
2209  if (s->predictor == 2) {
2210  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2211  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2212  return AVERROR_PATCHWELCOME;
2213  }
2214  dst = five_planes ? five_planes : p->data[plane];
2215  soff = s->bpp >> 3;
2216  if (s->planar)
2217  soff = FFMAX(soff / s->bppcount, 1);
2218  ssize = s->width * soff;
2219  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2220  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2221  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2222  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2223  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2224  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2225  for (i = 0; i < decoded_height; i++) {
2226  for (j = soff; j < ssize; j += 2)
2227  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2228  dst += stride;
2229  }
2230  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2231  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2232  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2233  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2234  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2235  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2236  for (i = 0; i < decoded_height; i++) {
2237  for (j = soff; j < ssize; j += 2)
2238  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2239  dst += stride;
2240  }
2241  } else {
2242  for (i = 0; i < decoded_height; i++) {
2243  for (j = soff; j < ssize; j++)
2244  dst[j] += dst[j - soff];
2245  dst += stride;
2246  }
2247  }
2248  }
2249 
2250  /* Floating point predictor
2251  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2252  if (s->predictor == 3) {
2253  int channels = s->bppcount;
2254  int group_size;
2255  uint8_t *tmpbuf;
2256  int bpc;
2257 
2258  dst = five_planes ? five_planes : p->data[plane];
2259  soff = s->bpp >> 3;
2260  if (s->planar) {
2261  soff = FFMAX(soff / s->bppcount, 1);
2262  channels = 1;
2263  }
2264  ssize = s->width * soff;
2265  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2266  group_size = s->width * channels;
2267 
2268  tmpbuf = av_malloc(ssize);
2269  if (!tmpbuf)
2270  return AVERROR(ENOMEM);
2271 
2272  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2273  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2274  for (i = 0; i < decoded_height; i++) {
2275  /* Copy first sample byte for each channel */
2276  for (j = 0; j < channels; j++)
2277  tmpbuf[j] = dst[j];
2278 
2279  /* Decode horizontal differences */
2280  for (j = channels; j < ssize; j++)
2281  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2282 
2283  /* Combine shuffled bytes from their separate groups. Each
2284  byte of every floating point value in a row of pixels is
2285  split and combined into separate groups. A group of all
2286  the sign/exponents bytes in the row and groups for each
2287  of the upper, mid, and lower mantissa bytes in the row. */
2288  for (j = 0; j < group_size; j++) {
2289  for (int k = 0; k < bpc; k++) {
2290  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2291  }
2292  }
2293  dst += stride;
2294  }
2295  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2296  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2297  /* Same as LE only the shuffle at the end is reversed */
2298  for (i = 0; i < decoded_height; i++) {
2299  for (j = 0; j < channels; j++)
2300  tmpbuf[j] = dst[j];
2301 
2302  for (j = channels; j < ssize; j++)
2303  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2304 
2305  for (j = 0; j < group_size; j++) {
2306  for (int k = 0; k < bpc; k++) {
2307  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2308  }
2309  }
2310  dst += stride;
2311  }
2312  } else {
2313  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2314  }
2315  av_free(tmpbuf);
2316  }
2317 
2318  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2319  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2320  dst = p->data[plane];
2321  for (i = 0; i < s->height; i++) {
2322  for (j = 0; j < stride; j++)
2323  dst[j] = c - dst[j];
2324  dst += stride;
2325  }
2326  }
2327 
2328  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2329  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2330  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2331  uint8_t *src = five_planes ? five_planes : p->data[plane];
2332  dst = p->data[plane];
2333  for (i = 0; i < s->height; i++) {
2334  for (j = 0; j < s->width; j++) {
2335  int k = 255 - src[x * j + 3];
2336  int r = (255 - src[x * j ]) * k;
2337  int g = (255 - src[x * j + 1]) * k;
2338  int b = (255 - src[x * j + 2]) * k;
2339  dst[4 * j ] = r * 257 >> 16;
2340  dst[4 * j + 1] = g * 257 >> 16;
2341  dst[4 * j + 2] = b * 257 >> 16;
2342  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2343  }
2344  src += stride;
2345  dst += p->linesize[plane];
2346  }
2347  av_freep(&five_planes);
2348  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2349  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2350  dst = p->data[plane];
2351  for (i = 0; i < s->height; i++) {
2352  for (j = 0; j < s->width; j++) {
2353  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2354  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2355  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2356  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2357  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2358  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2359  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2360  AV_WB16(dst + 8 * j + 6, 65535);
2361  }
2362  dst += p->linesize[plane];
2363  }
2364  }
2365  }
2366 
2367  if (s->planar && s->bppcount > 2) {
2368  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2369  FFSWAP(int, p->linesize[0], p->linesize[2]);
2370  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2371  FFSWAP(int, p->linesize[0], p->linesize[1]);
2372  }
2373 
2374  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2375  uint16_t *dst = (uint16_t *)p->data[0];
2376  for (i = 0; i < s->height; i++) {
2377  for (j = 0; j < s->width; j++)
2378  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2379  dst += stride / 2;
2380  }
2381  }
2382 
2383  p->flags |= AV_FRAME_FLAG_KEY;
2384  *got_frame = 1;
2385 
2386  return avpkt->size;
2387 }
2388 
2390 {
2391  TiffContext *s = avctx->priv_data;
2392  const AVCodec *codec;
2393  int ret;
2394 
2395  s->width = 0;
2396  s->height = 0;
2397  s->subsampling[0] =
2398  s->subsampling[1] = 1;
2399  s->avctx = avctx;
2400  ff_lzw_decode_open(&s->lzw);
2401  if (!s->lzw)
2402  return AVERROR(ENOMEM);
2404 
2405  /* Allocate JPEG frame */
2406  s->jpgframe = av_frame_alloc();
2407  s->jpkt = av_packet_alloc();
2408  if (!s->jpgframe || !s->jpkt)
2409  return AVERROR(ENOMEM);
2410 
2411  /* Prepare everything needed for JPEG decoding */
2413  if (!codec)
2414  return AVERROR_BUG;
2415  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2416  if (!s->avctx_mjpeg)
2417  return AVERROR(ENOMEM);
2418  s->avctx_mjpeg->flags = avctx->flags;
2419  s->avctx_mjpeg->flags2 = avctx->flags2;
2420  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2421  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2422  ret = avcodec_open2(s->avctx_mjpeg, codec, NULL);
2423  if (ret < 0) {
2424  return ret;
2425  }
2426 
2427  return 0;
2428 }
2429 
2430 static av_cold int tiff_end(AVCodecContext *avctx)
2431 {
2432  TiffContext *const s = avctx->priv_data;
2433 
2434  free_geotags(s);
2435 
2436  ff_lzw_decode_close(&s->lzw);
2437  av_freep(&s->deinvert_buf);
2438  s->deinvert_buf_size = 0;
2439  av_freep(&s->yuv_line);
2440  s->yuv_line_size = 0;
2441  av_frame_free(&s->jpgframe);
2442  av_packet_free(&s->jpkt);
2443  avcodec_free_context(&s->avctx_mjpeg);
2444  return 0;
2445 }
2446 
2447 #define OFFSET(x) offsetof(TiffContext, x)
2448 static const AVOption tiff_options[] = {
2449  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2450  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2451  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2452  { NULL },
2453 };
2454 
2455 static const AVClass tiff_decoder_class = {
2456  .class_name = "TIFF decoder",
2457  .item_name = av_default_item_name,
2458  .option = tiff_options,
2459  .version = LIBAVUTIL_VERSION_INT,
2460 };
2461 
2463  .p.name = "tiff",
2464  CODEC_LONG_NAME("TIFF image"),
2465  .p.type = AVMEDIA_TYPE_VIDEO,
2466  .p.id = AV_CODEC_ID_TIFF,
2467  .priv_data_size = sizeof(TiffContext),
2468  .init = tiff_init,
2469  .close = tiff_end,
2471  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2474  .p.priv_class = &tiff_decoder_class,
2475 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:71
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:623
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:208
TiffContext::gb
GetByteContext gb
Definition: tiff.c:60
AVCodec
AVCodec.
Definition: codec.h:187
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:147
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2462
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:112
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:157
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1246
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:107
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:198
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:131
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:101
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1879
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
TiffContext::strippos
int strippos
Definition: tiff.c:108
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:87
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:160
OFFSET
#define OFFSET(x)
Definition: tiff.c:2447
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
TiffContext::sot
int sot
Definition: tiff.c:107
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:244
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1536
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:127
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:155
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2430
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:113
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:133
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:89
AVPacket::data
uint8_t * data
Definition: packet.h:522
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:199
AVOption
AVOption.
Definition: opt.h:346
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:40
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:132
FFCodec
Definition: codec_internal.h:127
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:439
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:58
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:90
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:612
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:151
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:120
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:159
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:161
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:126
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:95
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:506
TiffGeoTagKeyName
Definition: tiff.h:220
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:190
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:134
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:146
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:116
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:284
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:119
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2448
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:69
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:203
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:51
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:194
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:119
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:104
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:72
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:80
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:392
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:465
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:129
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:965
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:84
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:54
GetBitContext
Definition: get_bits.h:108
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:145
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:118
TiffContext::tile_length
int tile_length
Definition: tiff.c:114
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:68
val
static double val(void *priv, double ch)
Definition: aeval.c:78
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:384
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:95
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:181
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:71
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:93
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:585
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:124
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:103
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:39
get_geokey_val
static const char * get_geokey_val(int key, uint16_t val)
Definition: tiff.c:185
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
TiffGeoTag
Definition: tiff.h:212
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:66
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:106
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:97
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:96
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:119
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:121
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
TiffContext::stripsize
int stripsize
Definition: tiff.c:108
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:559
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
TiffContext::width
int width
Definition: tiff.c:72
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:285
g
const char * g
Definition: vf_curves.c:127
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:34
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:59
fminf
float fminf(float, float)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:681
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:120
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:143
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:646
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:38
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:201
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:89
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:165
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:68
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:53
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1934
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:88
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:65
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:77
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:78
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
frame
static AVFrame * frame
Definition: demux_decode.c:54
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:176
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:286
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:503
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:303
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:118
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:123
TiffContext::height
int height
Definition: tiff.c:72
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:63
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:184
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:130
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2389
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:94
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:44
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:196
TiffContext
Definition: tiff.c:57
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:85
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:59
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:193
tiff_common.h
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:67
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:111
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:90
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:197
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:98
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:505
MJpegDecodeContext
Definition: mjpegdec.h:54
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:76
RET_GEOKEY_TYPE
#define RET_GEOKEY_TYPE(TYPE, array)
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:128
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:37
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:73
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:117
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:57
TIFF_SRATIONAL
@ TIFF_SRATIONAL
Definition: tiff_common.h:46
TIFF_G3
@ TIFF_G3
Definition: tiff.h:128
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:46
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:79
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:74
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:971
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:253
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:40
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:262
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:92
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:108
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:48
bps
unsigned bps
Definition: movenc.c:1787
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:96
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1873
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1818
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:73
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:140
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:64
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:52
TiffContext::fill_order
int fill_order
Definition: tiff.c:83
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:92
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:78
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:55
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
height
#define height
TiffContext::white_level
unsigned white_level
Definition: tiff.c:100
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:108
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
TiffContext::planar
int planar
Definition: tiff.c:79
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:49
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:47
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:171
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2455
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:104
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:67
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:202
TiffContext::le
int le
Definition: tiff.c:76
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:120
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:674
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:80
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:70
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1905
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:200
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:50
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:191
TiffContext::tile_width
int tile_width
Definition: tiff.c:114
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:81
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:73
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:452
TiffContext::res
uint32_t res[4]
Definition: tiff.c:84
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:91
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:62
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:287
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:36
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:75
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:275
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1547
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:38
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:101
TiffContext::stripoff
int stripoff
Definition: tiff.c:108
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:189
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:88
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:56
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:77
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1786
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:74
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:105
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:75
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:48
planes
static const struct @383 planes[]
d65_white
static const float d65_white[3]
Definition: tiff.c:127
pos
unsigned int pos
Definition: spdifenc.c:413
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:142
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:50
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:56
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:188
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:86
AVCodecContext
main external API structure.
Definition: avcodec.h:445
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:423
TiffContext::sstype
int sstype
Definition: tiff.c:106
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:75
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:41
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:658
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:109
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1227
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:136
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:625
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:94
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:121
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:112
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:421
RET_GEOKEY_STR
#define RET_GEOKEY_STR(TYPE, array)
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:61
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_clip_uint16
#define av_clip_uint16
Definition: common.h:110
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:93
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:109
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:273
desc
const char * desc
Definition: libsvtav1.c:73
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:424
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1038
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:134
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:135
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:192
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:499
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:311
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:736
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:106
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:113
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:144
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:63
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:60
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:270
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:125
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:149
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:104
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:53
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
TiffContext::predictor
int predictor
Definition: tiff.c:82
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:504
int
int
Definition: ffmpeg_filter.c:425
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:63
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:195
TiffContext::strips
int strips
Definition: tiff.c:106
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:157
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:118
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:103
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:288
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:121
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:110
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:141