FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82  unsigned last_tag;
83 
84  int is_bayer;
86  unsigned black_level;
87  unsigned white_level;
88  uint16_t dng_lut[65536];
89 
90  uint32_t sub_ifd;
91  uint16_t cur_page;
92 
93  int strips, rps, sstype;
94  int sot;
97 
98  /* Tile support */
99  int is_tiled;
103 
104  int is_jpeg;
105 
109  unsigned int yuv_line_size;
111  unsigned int fax_buffer_size;
112 
115 } TiffContext;
116 
117 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
118  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
119  s->tiff_type = tiff_type;
120 }
121 
122 static void free_geotags(TiffContext *const s)
123 {
124  int i;
125  for (i = 0; i < s->geotag_count; i++) {
126  if (s->geotags[i].val)
127  av_freep(&s->geotags[i].val);
128  }
129  av_freep(&s->geotags);
130  s->geotag_count = 0;
131 }
132 
133 #define RET_GEOKEY(TYPE, array, element)\
134  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
135  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
136  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
137 
138 static const char *get_geokey_name(int key)
139 {
140  RET_GEOKEY(VERT, vert, name);
141  RET_GEOKEY(PROJ, proj, name);
142  RET_GEOKEY(GEOG, geog, name);
143  RET_GEOKEY(CONF, conf, name);
144 
145  return NULL;
146 }
147 
148 static int get_geokey_type(int key)
149 {
150  RET_GEOKEY(VERT, vert, type);
151  RET_GEOKEY(PROJ, proj, type);
152  RET_GEOKEY(GEOG, geog, type);
153  RET_GEOKEY(CONF, conf, type);
154 
155  return AVERROR_INVALIDDATA;
156 }
157 
158 static int cmp_id_key(const void *id, const void *k)
159 {
160  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
161 }
162 
163 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
164 {
165  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
166  if(r)
167  return r->name;
168 
169  return NULL;
170 }
171 
172 static char *get_geokey_val(int key, int val)
173 {
174  char *ap;
175 
177  return av_strdup("undefined");
179  return av_strdup("User-Defined");
180 
181 #define RET_GEOKEY_VAL(TYPE, array)\
182  if (val >= TIFF_##TYPE##_OFFSET &&\
183  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
184  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
185 
186  switch (key) {
188  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
189  break;
191  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
192  break;
196  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
197  break;
200  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
201  break;
203  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
204  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
205  break;
207  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
208  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
209  break;
211  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
212  break;
214  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
215  break;
218  if(ap) return ap;
219  break;
222  if(ap) return ap;
223  break;
225  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
226  break;
228  RET_GEOKEY_VAL(VERT_CS, vert_cs);
229  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
230  break;
231 
232  }
233 
234  ap = av_malloc(14);
235  if (ap)
236  snprintf(ap, 14, "Unknown-%d", val);
237  return ap;
238 }
239 
240 static char *doubles2str(double *dp, int count, const char *sep)
241 {
242  int i;
243  char *ap, *ap0;
244  uint64_t component_len;
245  if (!sep) sep = ", ";
246  component_len = 24LL + strlen(sep);
247  if (count >= (INT_MAX - 1)/component_len)
248  return NULL;
249  ap = av_malloc(component_len * count + 1);
250  if (!ap)
251  return NULL;
252  ap0 = ap;
253  ap[0] = '\0';
254  for (i = 0; i < count; i++) {
255  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
256  if(l >= component_len) {
257  av_free(ap0);
258  return NULL;
259  }
260  ap += l;
261  }
262  ap0[strlen(ap0) - strlen(sep)] = '\0';
263  return ap0;
264 }
265 
266 static int add_metadata(int count, int type,
267  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
268 {
269  switch(type) {
270  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
271  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
272  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
273  default : return AVERROR_INVALIDDATA;
274  };
275 }
276 
277 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
278  const uint8_t *src, int src_stride, int width, int height,
279  int is_single_comp, int is_u16);
280 
282  unsigned int bpp, uint8_t* dst,
283  int usePtr, const uint8_t *src,
284  uint8_t c, int width, int offset)
285 {
286  switch (bpp) {
287  case 1:
288  while (--width >= 0) {
289  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
290  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
291  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
292  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
293  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
294  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
295  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
296  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
297  }
298  break;
299  case 2:
300  while (--width >= 0) {
301  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
302  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
303  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
304  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
305  }
306  break;
307  case 4:
308  while (--width >= 0) {
309  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
310  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
311  }
312  break;
313  case 10:
314  case 12:
315  case 14: {
316  uint16_t *dst16 = (uint16_t *)dst;
317  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
318  uint8_t shift = is_dng ? 0 : 16 - bpp;
319  GetBitContext gb;
320 
321  init_get_bits8(&gb, src, width);
322  for (int i = 0; i < s->width; i++) {
323  dst16[i] = get_bits(&gb, bpp) << shift;
324  }
325  }
326  break;
327  default:
328  if (usePtr) {
329  memcpy(dst + offset, src, width);
330  } else {
331  memset(dst + offset, c, width);
332  }
333  }
334 }
335 
336 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
337 {
338  int i;
339 
340  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
341  if (!s->deinvert_buf)
342  return AVERROR(ENOMEM);
343  for (i = 0; i < size; i++)
344  s->deinvert_buf[i] = ff_reverse[src[i]];
345 
346  return 0;
347 }
348 
349 static void unpack_gray(TiffContext *s, AVFrame *p,
350  const uint8_t *src, int lnum, int width, int bpp)
351 {
352  GetBitContext gb;
353  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
354 
355  init_get_bits8(&gb, src, width);
356 
357  for (int i = 0; i < s->width; i++) {
358  dst[i] = get_bits(&gb, bpp);
359  }
360 }
361 
362 static void unpack_yuv(TiffContext *s, AVFrame *p,
363  const uint8_t *src, int lnum)
364 {
365  int i, j, k;
366  int w = (s->width - 1) / s->subsampling[0] + 1;
367  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
368  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
369  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
370  for (i = 0; i < w; i++) {
371  for (j = 0; j < s->subsampling[1]; j++)
372  for (k = 0; k < s->subsampling[0]; k++)
373  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
374  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
375  *pu++ = *src++;
376  *pv++ = *src++;
377  }
378  }else{
379  for (i = 0; i < w; i++) {
380  for (j = 0; j < s->subsampling[1]; j++)
381  for (k = 0; k < s->subsampling[0]; k++)
382  p->data[0][(lnum + j) * p->linesize[0] +
383  i * s->subsampling[0] + k] = *src++;
384  *pu++ = *src++;
385  *pv++ = *src++;
386  }
387  }
388 }
389 
390 #if CONFIG_ZLIB
391 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
392  int size)
393 {
394  z_stream zstream = { 0 };
395  int zret;
396 
397  zstream.next_in = src;
398  zstream.avail_in = size;
399  zstream.next_out = dst;
400  zstream.avail_out = *len;
401  zret = inflateInit(&zstream);
402  if (zret != Z_OK) {
403  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
404  return zret;
405  }
406  zret = inflate(&zstream, Z_SYNC_FLUSH);
407  inflateEnd(&zstream);
408  *len = zstream.total_out;
409  return zret == Z_STREAM_END ? Z_OK : zret;
410 }
411 
412 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
413  const uint8_t *src, int size, int width, int lines,
414  int strip_start, int is_yuv)
415 {
416  uint8_t *zbuf;
417  unsigned long outlen;
418  int ret, line;
419  outlen = width * lines;
420  zbuf = av_malloc(outlen);
421  if (!zbuf)
422  return AVERROR(ENOMEM);
423  if (s->fill_order) {
424  if ((ret = deinvert_buffer(s, src, size)) < 0) {
425  av_free(zbuf);
426  return ret;
427  }
428  src = s->deinvert_buf;
429  }
430  ret = tiff_uncompress(zbuf, &outlen, src, size);
431  if (ret != Z_OK) {
432  av_log(s->avctx, AV_LOG_ERROR,
433  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
434  (unsigned long)width * lines, ret);
435  av_free(zbuf);
436  return AVERROR_UNKNOWN;
437  }
438  src = zbuf;
439  for (line = 0; line < lines; line++) {
440  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
441  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
442  } else {
443  memcpy(dst, src, width);
444  }
445  if (is_yuv) {
446  unpack_yuv(s, p, dst, strip_start + line);
447  line += s->subsampling[1] - 1;
448  }
449  dst += stride;
450  src += width;
451  }
452  av_free(zbuf);
453  return 0;
454 }
455 #endif
456 
457 #if CONFIG_LZMA
458 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
459  int size)
460 {
461  lzma_stream stream = LZMA_STREAM_INIT;
462  lzma_ret ret;
463 
464  stream.next_in = (uint8_t *)src;
465  stream.avail_in = size;
466  stream.next_out = dst;
467  stream.avail_out = *len;
468  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
469  if (ret != LZMA_OK) {
470  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
471  return ret;
472  }
473  ret = lzma_code(&stream, LZMA_RUN);
474  lzma_end(&stream);
475  *len = stream.total_out;
476  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
477 }
478 
479 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
480  const uint8_t *src, int size, int width, int lines,
481  int strip_start, int is_yuv)
482 {
483  uint64_t outlen = width * (uint64_t)lines;
484  int ret, line;
485  uint8_t *buf = av_malloc(outlen);
486  if (!buf)
487  return AVERROR(ENOMEM);
488  if (s->fill_order) {
489  if ((ret = deinvert_buffer(s, src, size)) < 0) {
490  av_free(buf);
491  return ret;
492  }
493  src = s->deinvert_buf;
494  }
495  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
496  if (ret != LZMA_OK) {
497  av_log(s->avctx, AV_LOG_ERROR,
498  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
499  (uint64_t)width * lines, ret);
500  av_free(buf);
501  return AVERROR_UNKNOWN;
502  }
503  src = buf;
504  for (line = 0; line < lines; line++) {
505  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
506  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
507  } else {
508  memcpy(dst, src, width);
509  }
510  if (is_yuv) {
511  unpack_yuv(s, p, dst, strip_start + line);
512  line += s->subsampling[1] - 1;
513  }
514  dst += stride;
515  src += width;
516  }
517  av_free(buf);
518  return 0;
519 }
520 #endif
521 
522 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
523  const uint8_t *src, int size, int width, int lines)
524 {
525  int i, ret = 0;
526  int line;
527  uint8_t *src2;
528 
529  av_fast_padded_malloc(&s->fax_buffer, &s->fax_buffer_size, size);
530  src2 = s->fax_buffer;
531 
532  if (!src2) {
533  av_log(s->avctx, AV_LOG_ERROR,
534  "Error allocating temporary buffer\n");
535  return AVERROR(ENOMEM);
536  }
537 
538  if (!s->fill_order) {
539  memcpy(src2, src, size);
540  } else {
541  for (i = 0; i < size; i++)
542  src2[i] = ff_reverse[src[i]];
543  }
544  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
545  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
546  s->compr, s->fax_opts);
547  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
548  for (line = 0; line < lines; line++) {
549  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
550  dst += stride;
551  }
552  return ret;
553 }
554 
555 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame);
556 
558  const uint8_t *src, int size, int strip_start, int lines)
559 {
560  PutByteContext pb;
561  int c, line, pixels, code, ret;
562  const uint8_t *ssrc = src;
563  int width = ((s->width * s->bpp) + 7) >> 3;
565  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
566  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
567  desc->nb_components >= 3;
568  int is_dng;
569 
570  if (s->planar)
571  width /= s->bppcount;
572 
573  if (size <= 0)
574  return AVERROR_INVALIDDATA;
575 
576  if (is_yuv) {
577  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
578  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
579  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
580  if (s->yuv_line == NULL) {
581  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
582  return AVERROR(ENOMEM);
583  }
584  dst = s->yuv_line;
585  stride = 0;
586 
587  width = (s->width - 1) / s->subsampling[0] + 1;
588  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
589  av_assert0(width <= bytes_per_row);
590  av_assert0(s->bpp == 24);
591  }
592  if (s->is_bayer) {
593  av_assert0(width == (s->bpp * s->width + 7) >> 3);
594  }
595  if (p->format == AV_PIX_FMT_GRAY12) {
596  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
597  if (s->yuv_line == NULL) {
598  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
599  return AVERROR(ENOMEM);
600  }
601  dst = s->yuv_line;
602  stride = 0;
603  }
604 
605  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
606 #if CONFIG_ZLIB
607  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
608  strip_start, is_yuv);
609 #else
610  av_log(s->avctx, AV_LOG_ERROR,
611  "zlib support not enabled, "
612  "deflate compression not supported\n");
613  return AVERROR(ENOSYS);
614 #endif
615  }
616  if (s->compr == TIFF_LZMA) {
617 #if CONFIG_LZMA
618  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
619  strip_start, is_yuv);
620 #else
621  av_log(s->avctx, AV_LOG_ERROR,
622  "LZMA support not enabled\n");
623  return AVERROR(ENOSYS);
624 #endif
625  }
626  if (s->compr == TIFF_LZW) {
627  if (s->fill_order) {
628  if ((ret = deinvert_buffer(s, src, size)) < 0)
629  return ret;
630  ssrc = src = s->deinvert_buf;
631  }
632  if (size > 1 && !src[0] && (src[1]&1)) {
633  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
634  }
635  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
636  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
637  return ret;
638  }
639  for (line = 0; line < lines; line++) {
640  pixels = ff_lzw_decode(s->lzw, dst, width);
641  if (pixels < width) {
642  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
643  pixels, width);
644  return AVERROR_INVALIDDATA;
645  }
646  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
647  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
648  if (is_yuv) {
649  unpack_yuv(s, p, dst, strip_start + line);
650  line += s->subsampling[1] - 1;
651  } else if (p->format == AV_PIX_FMT_GRAY12) {
652  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
653  }
654  dst += stride;
655  }
656  return 0;
657  }
658  if (s->compr == TIFF_CCITT_RLE ||
659  s->compr == TIFF_G3 ||
660  s->compr == TIFF_G4) {
661  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
662  return AVERROR_INVALIDDATA;
663 
664  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
665  }
666 
667  bytestream2_init(&s->gb, src, size);
668  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
669 
670  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
671 
672  /* Decode JPEG-encoded DNGs with strips */
673  if (s->compr == TIFF_NEWJPEG && is_dng) {
674  if (s->strips > 1) {
675  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
676  return AVERROR_PATCHWELCOME;
677  }
678  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
679  return ret;
680  return 0;
681  }
682 
683  if (is_dng && stride == 0)
684  return AVERROR_INVALIDDATA;
685 
686  for (line = 0; line < lines; line++) {
687  if (src - ssrc > size) {
688  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
689  return AVERROR_INVALIDDATA;
690  }
691 
692  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
693  break;
694  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
695  switch (s->compr) {
696  case TIFF_RAW:
697  if (ssrc + size - src < width)
698  return AVERROR_INVALIDDATA;
699 
700  if (!s->fill_order) {
701  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
702  dst, 1, src, 0, width, 0);
703  } else {
704  int i;
705  for (i = 0; i < width; i++)
706  dst[i] = ff_reverse[src[i]];
707  }
708 
709  /* Color processing for DNG images with uncompressed strips (non-tiled) */
710  if (is_dng) {
711  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
712 
713  is_u16 = (s->bpp / s->bppcount > 8);
714  pixel_size_bits = (is_u16 ? 16 : 8);
715  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
716 
717  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
718  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
719  dng_blit(s,
720  dst,
721  0, // no stride, only 1 line
722  dst,
723  0, // no stride, only 1 line
724  elements,
725  1,
726  0, // single-component variation is only preset in JPEG-encoded DNGs
727  is_u16);
728  }
729 
730  src += width;
731  break;
732  case TIFF_PACKBITS:
733  for (pixels = 0; pixels < width;) {
734  if (ssrc + size - src < 2) {
735  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
736  return AVERROR_INVALIDDATA;
737  }
738  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
739  if (code >= 0) {
740  code++;
741  if (pixels + code > width ||
742  ssrc + size - src < code) {
743  av_log(s->avctx, AV_LOG_ERROR,
744  "Copy went out of bounds\n");
745  return AVERROR_INVALIDDATA;
746  }
747  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
748  dst, 1, src, 0, code, pixels);
749  src += code;
750  pixels += code;
751  } else if (code != -128) { // -127..-1
752  code = (-code) + 1;
753  if (pixels + code > width) {
754  av_log(s->avctx, AV_LOG_ERROR,
755  "Run went out of bounds\n");
756  return AVERROR_INVALIDDATA;
757  }
758  c = *src++;
759  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
760  dst, 0, NULL, c, code, pixels);
761  pixels += code;
762  }
763  }
764  if (s->fill_order) {
765  int i;
766  for (i = 0; i < width; i++)
767  dst[i] = ff_reverse[dst[i]];
768  }
769  break;
770  }
771  if (is_yuv) {
772  unpack_yuv(s, p, dst, strip_start + line);
773  line += s->subsampling[1] - 1;
774  } else if (p->format == AV_PIX_FMT_GRAY12) {
775  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
776  }
777  dst += stride;
778  }
779  return 0;
780 }
781 
782 /**
783  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
784  */
785 static uint16_t av_always_inline dng_process_color16(uint16_t value,
786  const uint16_t *lut,
787  uint16_t black_level,
788  float scale_factor) {
789  float value_norm;
790 
791  // Lookup table lookup
792  if (lut)
793  value = lut[value];
794 
795  // Black level subtraction
796  value = av_clip_uint16_c((unsigned)value - black_level);
797 
798  // Color scaling
799  value_norm = (float)value * scale_factor;
800 
801  value = av_clip_uint16_c(value_norm * 65535);
802 
803  return value;
804 }
805 
806 static uint16_t av_always_inline dng_process_color8(uint16_t value,
807  const uint16_t *lut,
808  uint16_t black_level,
809  float scale_factor) {
810  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
811 }
812 
813 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
814  const uint8_t *src, int src_stride,
815  int width, int height, int is_single_comp, int is_u16)
816 {
817  int line, col;
818  float scale_factor;
819 
820  scale_factor = 1.0f / (s->white_level - s->black_level);
821 
822  if (is_single_comp) {
823  if (!is_u16)
824  return; /* <= 8bpp unsupported */
825 
826  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
827  (split vertically in the middle). */
828  for (line = 0; line < height / 2; line++) {
829  uint16_t *dst_u16 = (uint16_t *)dst;
830  uint16_t *src_u16 = (uint16_t *)src;
831 
832  /* Blit first half of input row row to initial row of output */
833  for (col = 0; col < width; col++)
834  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
835 
836  /* Advance the destination pointer by a row (source pointer remains in the same place) */
837  dst += dst_stride * sizeof(uint16_t);
838  dst_u16 = (uint16_t *)dst;
839 
840  /* Blit second half of input row row to next row of output */
841  for (col = 0; col < width; col++)
842  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
843 
844  dst += dst_stride * sizeof(uint16_t);
845  src += src_stride * sizeof(uint16_t);
846  }
847  } else {
848  /* Input and output image are the same size and the MJpeg decoder has done per-component
849  deinterleaving, so blitting here is straightforward. */
850  if (is_u16) {
851  for (line = 0; line < height; line++) {
852  uint16_t *dst_u16 = (uint16_t *)dst;
853  uint16_t *src_u16 = (uint16_t *)src;
854 
855  for (col = 0; col < width; col++)
856  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
857 
858  dst += dst_stride * sizeof(uint16_t);
859  src += src_stride * sizeof(uint16_t);
860  }
861  } else {
862  for (line = 0; line < height; line++) {
863  uint8_t *dst_u8 = dst;
864  const uint8_t *src_u8 = src;
865 
866  for (col = 0; col < width; col++)
867  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut, s->black_level, scale_factor);
868 
869  dst += dst_stride;
870  src += src_stride;
871  }
872  }
873  }
874 }
875 
877  int tile_byte_count, int dst_x, int dst_y, int w, int h)
878 {
879  TiffContext *s = avctx->priv_data;
880  AVPacket jpkt;
881  uint8_t *dst_data, *src_data;
882  uint32_t dst_offset; /* offset from dst buffer in pixels */
883  int is_single_comp, is_u16, pixel_size;
884  int ret;
885 
886  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
887  return AVERROR_INVALIDDATA;
888 
889  /* Prepare a packet and send to the MJPEG decoder */
890  av_init_packet(&jpkt);
891  jpkt.data = (uint8_t*)s->gb.buffer;
892  jpkt.size = tile_byte_count;
893 
894  if (s->is_bayer) {
895  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
896  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
897  image or not from its own data (and we need that information when decoding it). */
898  mjpegdecctx->bayer = 1;
899  }
900 
901  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
902  if (ret < 0) {
903  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
904  return ret;
905  }
906 
907  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
908  if (ret < 0) {
909  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
910 
911  /* Normally skip, error if explode */
912  if (avctx->err_recognition & AV_EF_EXPLODE)
913  return AVERROR_INVALIDDATA;
914  else
915  return 0;
916  }
917 
918  is_u16 = (s->bpp > 8);
919 
920  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
921 
922  if (s->jpgframe->width != s->avctx_mjpeg->width ||
923  s->jpgframe->height != s->avctx_mjpeg->height ||
924  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
925  return AVERROR_INVALIDDATA;
926 
927  /* See dng_blit for explanation */
928  if (s->avctx_mjpeg->width == w * 2 &&
929  s->avctx_mjpeg->height == h / 2 &&
930  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
931  is_single_comp = 1;
932  } else if (s->avctx_mjpeg->width == w &&
933  s->avctx_mjpeg->height == h &&
934  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
935  ) {
936  is_single_comp = 0;
937  } else
938  return AVERROR_INVALIDDATA;
939 
940  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
941 
942  if (is_single_comp && !is_u16) {
943  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
944  av_frame_unref(s->jpgframe);
945  return AVERROR_PATCHWELCOME;
946  }
947 
948  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
949  dst_data = frame->data[0] + dst_offset * pixel_size;
950  src_data = s->jpgframe->data[0];
951 
952  dng_blit(s,
953  dst_data,
954  frame->linesize[0] / pixel_size,
955  src_data,
956  s->jpgframe->linesize[0] / pixel_size,
957  w,
958  h,
959  is_single_comp,
960  is_u16);
961 
962  av_frame_unref(s->jpgframe);
963 
964  return 0;
965 }
966 
968 {
969  TiffContext *s = avctx->priv_data;
970  int tile_idx;
971  int tile_offset_offset, tile_offset;
972  int tile_byte_count_offset, tile_byte_count;
973  int tile_count_x, tile_count_y;
974  int tile_width, tile_length;
975  int has_width_leftover, has_height_leftover;
976  int tile_x = 0, tile_y = 0;
977  int pos_x = 0, pos_y = 0;
978  int ret;
979 
980  s->jpgframe->width = s->tile_width;
981  s->jpgframe->height = s->tile_length;
982 
983  s->avctx_mjpeg->width = s->tile_width;
984  s->avctx_mjpeg->height = s->tile_length;
985 
986  has_width_leftover = (s->width % s->tile_width != 0);
987  has_height_leftover = (s->height % s->tile_length != 0);
988 
989  /* Calculate tile counts (round up) */
990  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
991  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
992 
993  /* Iterate over the number of tiles */
994  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
995  tile_x = tile_idx % tile_count_x;
996  tile_y = tile_idx / tile_count_x;
997 
998  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
999  tile_width = s->width % s->tile_width;
1000  else
1001  tile_width = s->tile_width;
1002 
1003  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1004  tile_length = s->height % s->tile_length;
1005  else
1006  tile_length = s->tile_length;
1007 
1008  /* Read tile offset */
1009  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1010  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1011  tile_offset = ff_tget_long(&s->gb, s->le);
1012 
1013  /* Read tile byte size */
1014  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1015  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1016  tile_byte_count = ff_tget_long(&s->gb, s->le);
1017 
1018  /* Seek to tile data */
1019  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1020 
1021  /* Decode JPEG tile and copy it in the reference frame */
1022  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1023 
1024  if (ret < 0)
1025  return ret;
1026 
1027  /* Advance current positions */
1028  pos_x += tile_width;
1029  if (tile_x == tile_count_x - 1) { // If on the right edge
1030  pos_x = 0;
1031  pos_y += tile_length;
1032  }
1033  }
1034 
1035  /* Frame is ready to be output */
1036  frame->pict_type = AV_PICTURE_TYPE_I;
1037  frame->key_frame = 1;
1038 
1039  return avpkt->size;
1040 }
1041 
1043 {
1044  TiffContext *s = avctx->priv_data;
1045 
1046  s->jpgframe->width = s->width;
1047  s->jpgframe->height = s->height;
1048 
1049  s->avctx_mjpeg->width = s->width;
1050  s->avctx_mjpeg->height = s->height;
1051 
1052  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1053 }
1054 
1056 {
1057  int ret;
1058  int create_gray_palette = 0;
1059 
1060  // make sure there is no aliasing in the following switch
1061  if (s->bpp >= 100 || s->bppcount >= 10) {
1062  av_log(s->avctx, AV_LOG_ERROR,
1063  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1064  s->bpp, s->bppcount);
1065  return AVERROR_INVALIDDATA;
1066  }
1067 
1068  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1069  case 11:
1070  if (!s->palette_is_set) {
1071  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1072  break;
1073  }
1074  case 21:
1075  case 41:
1076  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1077  if (!s->palette_is_set) {
1078  create_gray_palette = 1;
1079  }
1080  break;
1081  case 81:
1082  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1083  break;
1084  case 121:
1085  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1086  break;
1087  case 10081:
1088  switch (AV_RL32(s->pattern)) {
1089  case 0x02010100:
1090  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1091  break;
1092  case 0x00010102:
1093  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1094  break;
1095  case 0x01000201:
1096  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1097  break;
1098  case 0x01020001:
1099  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1100  break;
1101  default:
1102  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1103  AV_RL32(s->pattern));
1104  return AVERROR_PATCHWELCOME;
1105  }
1106  break;
1107  case 10101:
1108  case 10121:
1109  case 10141:
1110  case 10161:
1111  switch (AV_RL32(s->pattern)) {
1112  case 0x02010100:
1113  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1114  break;
1115  case 0x00010102:
1116  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1117  break;
1118  case 0x01000201:
1119  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1120  break;
1121  case 0x01020001:
1122  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1123  break;
1124  default:
1125  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1126  AV_RL32(s->pattern));
1127  return AVERROR_PATCHWELCOME;
1128  }
1129  break;
1130  case 243:
1131  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1132  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1133  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1134  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1135  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1136  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1137  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1138  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1139  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1140  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1141  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1142  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1143  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1144  } else {
1145  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1146  return AVERROR_PATCHWELCOME;
1147  }
1148  } else
1149  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1150  break;
1151  case 161:
1152  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1153  break;
1154  case 162:
1155  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1156  break;
1157  case 322:
1158  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1159  break;
1160  case 324:
1161  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1162  break;
1163  case 405:
1164  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1165  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1166  else {
1167  av_log(s->avctx, AV_LOG_ERROR,
1168  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1169  return AVERROR_PATCHWELCOME;
1170  }
1171  break;
1172  case 483:
1173  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1174  break;
1175  case 644:
1176  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1177  break;
1178  case 1243:
1179  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1180  break;
1181  case 1324:
1182  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1183  break;
1184  case 1483:
1185  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1186  break;
1187  case 1644:
1188  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1189  break;
1190  default:
1191  av_log(s->avctx, AV_LOG_ERROR,
1192  "This format is not supported (bpp=%d, bppcount=%d)\n",
1193  s->bpp, s->bppcount);
1194  return AVERROR_INVALIDDATA;
1195  }
1196 
1197  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1198  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1199  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1200  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1201  desc->nb_components < 3) {
1202  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1203  return AVERROR_INVALIDDATA;
1204  }
1205  }
1206 
1207  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1208  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1209  if (ret < 0)
1210  return ret;
1211  }
1212  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1213  return ret;
1214  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1215  if (!create_gray_palette)
1216  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1217  else {
1218  /* make default grayscale pal */
1219  int i;
1220  uint32_t *pal = (uint32_t *)frame->f->data[1];
1221  for (i = 0; i < 1<<s->bpp; i++)
1222  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1223  }
1224  }
1225  return 0;
1226 }
1227 
1228 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1229 {
1230  int offset = tag == TIFF_YRES ? 2 : 0;
1231  s->res[offset++] = num;
1232  s->res[offset] = den;
1233  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1234  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1235  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1236  if (num > INT64_MAX || den > INT64_MAX) {
1237  num = num >> 1;
1238  den = den >> 1;
1239  }
1240  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1241  num, den, INT32_MAX);
1242  if (!s->avctx->sample_aspect_ratio.den)
1243  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1244  }
1245 }
1246 
1248 {
1249  AVFrameSideData *sd;
1250  GetByteContext gb_temp;
1251  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1252  int i, start;
1253  int pos;
1254  int ret;
1255  double *dp;
1256 
1257  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1258  if (ret < 0) {
1259  goto end;
1260  }
1261  if (tag <= s->last_tag)
1262  return AVERROR_INVALIDDATA;
1263 
1264  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1265  if (tag != TIFF_STRIP_SIZE)
1266  s->last_tag = tag;
1267 
1268  off = bytestream2_tell(&s->gb);
1269  if (count == 1) {
1270  switch (type) {
1271  case TIFF_BYTE:
1272  case TIFF_SHORT:
1273  case TIFF_LONG:
1274  value = ff_tget(&s->gb, type, s->le);
1275  break;
1276  case TIFF_RATIONAL:
1277  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1278  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1279  if (!value2) {
1280  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1281  return AVERROR_INVALIDDATA;
1282  }
1283 
1284  break;
1285  case TIFF_STRING:
1286  if (count <= 4) {
1287  break;
1288  }
1289  default:
1290  value = UINT_MAX;
1291  }
1292  }
1293 
1294  switch (tag) {
1295  case TIFF_SUBFILE:
1296  s->is_thumbnail = (value != 0);
1297  break;
1298  case TIFF_WIDTH:
1299  s->width = value;
1300  break;
1301  case TIFF_HEIGHT:
1302  s->height = value;
1303  break;
1304  case TIFF_BPP:
1305  if (count > 5 || count <= 0) {
1306  av_log(s->avctx, AV_LOG_ERROR,
1307  "This format is not supported (bpp=%d, %d components)\n",
1308  value, count);
1309  return AVERROR_INVALIDDATA;
1310  }
1311  s->bppcount = count;
1312  if (count == 1)
1313  s->bpp = value;
1314  else {
1315  switch (type) {
1316  case TIFF_BYTE:
1317  case TIFF_SHORT:
1318  case TIFF_LONG:
1319  s->bpp = 0;
1320  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1321  return AVERROR_INVALIDDATA;
1322  for (i = 0; i < count; i++)
1323  s->bpp += ff_tget(&s->gb, type, s->le);
1324  break;
1325  default:
1326  s->bpp = -1;
1327  }
1328  }
1329  break;
1331  if (count != 1) {
1332  av_log(s->avctx, AV_LOG_ERROR,
1333  "Samples per pixel requires a single value, many provided\n");
1334  return AVERROR_INVALIDDATA;
1335  }
1336  if (value > 5 || value <= 0) {
1337  av_log(s->avctx, AV_LOG_ERROR,
1338  "Invalid samples per pixel %d\n", value);
1339  return AVERROR_INVALIDDATA;
1340  }
1341  if (s->bppcount == 1)
1342  s->bpp *= value;
1343  s->bppcount = value;
1344  break;
1345  case TIFF_COMPR:
1346  s->compr = value;
1347  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1348  s->predictor = 0;
1349  switch (s->compr) {
1350  case TIFF_RAW:
1351  case TIFF_PACKBITS:
1352  case TIFF_LZW:
1353  case TIFF_CCITT_RLE:
1354  break;
1355  case TIFF_G3:
1356  case TIFF_G4:
1357  s->fax_opts = 0;
1358  break;
1359  case TIFF_DEFLATE:
1360  case TIFF_ADOBE_DEFLATE:
1361 #if CONFIG_ZLIB
1362  break;
1363 #else
1364  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1365  return AVERROR(ENOSYS);
1366 #endif
1367  case TIFF_JPEG:
1368  case TIFF_NEWJPEG:
1369  s->is_jpeg = 1;
1370  break;
1371  case TIFF_LZMA:
1372 #if CONFIG_LZMA
1373  break;
1374 #else
1375  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1376  return AVERROR(ENOSYS);
1377 #endif
1378  default:
1379  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1380  s->compr);
1381  return AVERROR_INVALIDDATA;
1382  }
1383  break;
1384  case TIFF_ROWSPERSTRIP:
1385  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1386  value = s->height;
1387  s->rps = FFMIN(value, s->height);
1388  break;
1389  case TIFF_STRIP_OFFS:
1390  if (count == 1) {
1391  if (value > INT_MAX) {
1392  av_log(s->avctx, AV_LOG_ERROR,
1393  "strippos %u too large\n", value);
1394  return AVERROR_INVALIDDATA;
1395  }
1396  s->strippos = 0;
1397  s->stripoff = value;
1398  } else
1399  s->strippos = off;
1400  s->strips = count;
1401  if (s->strips == 1)
1402  s->rps = s->height;
1403  s->sot = type;
1404  break;
1405  case TIFF_STRIP_SIZE:
1406  if (count == 1) {
1407  if (value > INT_MAX) {
1408  av_log(s->avctx, AV_LOG_ERROR,
1409  "stripsize %u too large\n", value);
1410  return AVERROR_INVALIDDATA;
1411  }
1412  s->stripsizesoff = 0;
1413  s->stripsize = value;
1414  s->strips = 1;
1415  } else {
1416  s->stripsizesoff = off;
1417  }
1418  s->strips = count;
1419  s->sstype = type;
1420  break;
1421  case TIFF_XRES:
1422  case TIFF_YRES:
1423  set_sar(s, tag, value, value2);
1424  break;
1425  case TIFF_TILE_OFFSETS:
1426  s->tile_offsets_offset = off;
1427  s->tile_count = count;
1428  s->is_tiled = 1;
1429  break;
1430  case TIFF_TILE_BYTE_COUNTS:
1431  s->tile_byte_counts_offset = off;
1432  break;
1433  case TIFF_TILE_LENGTH:
1434  s->tile_length = value;
1435  break;
1436  case TIFF_TILE_WIDTH:
1437  s->tile_width = value;
1438  break;
1439  case TIFF_PREDICTOR:
1440  s->predictor = value;
1441  break;
1442  case TIFF_SUB_IFDS:
1443  if (count == 1)
1444  s->sub_ifd = value;
1445  else if (count > 1)
1446  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1447  break;
1449  if (count > FF_ARRAY_ELEMS(s->dng_lut))
1450  return AVERROR_INVALIDDATA;
1451  for (int i = 0; i < count; i++)
1452  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1453  break;
1454  case DNG_BLACK_LEVEL:
1455  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1456  if (type == TIFF_RATIONAL) {
1457  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1458  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1459  if (!value2) {
1460  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1461  return AVERROR_INVALIDDATA;
1462  }
1463 
1464  s->black_level = value / value2;
1465  } else
1466  s->black_level = ff_tget(&s->gb, type, s->le);
1467  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1468  } else {
1469  s->black_level = value / value2;
1470  }
1471  break;
1472  case DNG_WHITE_LEVEL:
1473  s->white_level = value;
1474  break;
1475  case TIFF_CFA_PATTERN_DIM:
1476  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1477  ff_tget(&s->gb, type, s->le) != 2)) {
1478  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1479  return AVERROR_INVALIDDATA;
1480  }
1481  break;
1482  case TIFF_CFA_PATTERN:
1483  s->is_bayer = 1;
1484  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1485  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1486  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1487  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1488  break;
1489  case TIFF_PHOTOMETRIC:
1490  switch (value) {
1493  case TIFF_PHOTOMETRIC_RGB:
1497  case TIFF_PHOTOMETRIC_CFA:
1498  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1499  s->photometric = value;
1500  break;
1508  "PhotometricInterpretation 0x%04X",
1509  value);
1510  return AVERROR_PATCHWELCOME;
1511  default:
1512  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1513  "unknown\n", value);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  break;
1517  case TIFF_FILL_ORDER:
1518  if (value < 1 || value > 2) {
1519  av_log(s->avctx, AV_LOG_ERROR,
1520  "Unknown FillOrder value %d, trying default one\n", value);
1521  value = 1;
1522  }
1523  s->fill_order = value - 1;
1524  break;
1525  case TIFF_PAL: {
1526  GetByteContext pal_gb[3];
1527  off = type_sizes[type];
1528  if (count / 3 > 256 ||
1529  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1530  return AVERROR_INVALIDDATA;
1531 
1532  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1533  bytestream2_skip(&pal_gb[1], count / 3 * off);
1534  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1535 
1536  off = (type_sizes[type] - 1) << 3;
1537  if (off > 31U) {
1538  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1539  return AVERROR_INVALIDDATA;
1540  }
1541 
1542  for (i = 0; i < count / 3; i++) {
1543  uint32_t p = 0xFF000000;
1544  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1545  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1546  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1547  s->palette[i] = p;
1548  }
1549  s->palette_is_set = 1;
1550  break;
1551  }
1552  case TIFF_PLANAR:
1553  s->planar = value == 2;
1554  break;
1556  if (count != 2) {
1557  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1558  return AVERROR_INVALIDDATA;
1559  }
1560  for (i = 0; i < count; i++) {
1561  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1562  if (s->subsampling[i] <= 0) {
1563  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1564  s->subsampling[i] = 1;
1565  return AVERROR_INVALIDDATA;
1566  }
1567  }
1568  break;
1569  case TIFF_T4OPTIONS:
1570  if (s->compr == TIFF_G3)
1571  s->fax_opts = value;
1572  break;
1573  case TIFF_T6OPTIONS:
1574  if (s->compr == TIFF_G4)
1575  s->fax_opts = value;
1576  break;
1577 #define ADD_METADATA(count, name, sep)\
1578  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1579  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1580  goto end;\
1581  }
1583  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1584  break;
1586  ADD_METADATA(count, "ModelTransformationTag", NULL);
1587  break;
1588  case TIFF_MODEL_TIEPOINT:
1589  ADD_METADATA(count, "ModelTiepointTag", NULL);
1590  break;
1592  if (s->geotag_count) {
1593  avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
1594  return AVERROR_INVALIDDATA;
1595  }
1596  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1597  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1598  s->geotag_count = ff_tget_short(&s->gb, s->le);
1599  if (s->geotag_count > count / 4 - 1) {
1600  s->geotag_count = count / 4 - 1;
1601  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1602  }
1603  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1604  || s->geotag_count == 0) {
1605  s->geotag_count = 0;
1606  return -1;
1607  }
1608  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1609  if (!s->geotags) {
1610  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1611  s->geotag_count = 0;
1612  goto end;
1613  }
1614  for (i = 0; i < s->geotag_count; i++) {
1615  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1616  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1617  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1618 
1619  if (!s->geotags[i].type)
1620  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1621  else
1622  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1623  }
1624  break;
1626  if (count >= INT_MAX / sizeof(int64_t))
1627  return AVERROR_INVALIDDATA;
1628  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1629  return AVERROR_INVALIDDATA;
1630  dp = av_malloc_array(count, sizeof(double));
1631  if (!dp) {
1632  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1633  goto end;
1634  }
1635  for (i = 0; i < count; i++)
1636  dp[i] = ff_tget_double(&s->gb, s->le);
1637  for (i = 0; i < s->geotag_count; i++) {
1638  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1639  if (s->geotags[i].count == 0
1640  || s->geotags[i].offset + s->geotags[i].count > count) {
1641  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1642  } else if (s->geotags[i].val) {
1643  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1644  } else {
1645  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1646  if (!ap) {
1647  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1648  av_freep(&dp);
1649  return AVERROR(ENOMEM);
1650  }
1651  s->geotags[i].val = ap;
1652  }
1653  }
1654  }
1655  av_freep(&dp);
1656  break;
1657  case TIFF_GEO_ASCII_PARAMS:
1658  pos = bytestream2_tell(&s->gb);
1659  for (i = 0; i < s->geotag_count; i++) {
1660  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1661  if (s->geotags[i].count == 0
1662  || s->geotags[i].offset + s->geotags[i].count > count) {
1663  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1664  } else {
1665  char *ap;
1666 
1667  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1668  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1669  return AVERROR_INVALIDDATA;
1670  if (s->geotags[i].val)
1671  return AVERROR_INVALIDDATA;
1672  ap = av_malloc(s->geotags[i].count);
1673  if (!ap) {
1674  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1675  return AVERROR(ENOMEM);
1676  }
1677  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1678  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1679  s->geotags[i].val = ap;
1680  }
1681  }
1682  }
1683  break;
1684  case TIFF_ICC_PROFILE:
1685  if (type != TIFF_UNDEFINED)
1686  return AVERROR_INVALIDDATA;
1687 
1688  gb_temp = s->gb;
1689  bytestream2_seek(&gb_temp, SEEK_SET, off);
1690 
1691  if (bytestream2_get_bytes_left(&gb_temp) < count)
1692  return AVERROR_INVALIDDATA;
1693 
1695  if (!sd)
1696  return AVERROR(ENOMEM);
1697 
1698  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1699  break;
1700  case TIFF_ARTIST:
1701  ADD_METADATA(count, "artist", NULL);
1702  break;
1703  case TIFF_COPYRIGHT:
1704  ADD_METADATA(count, "copyright", NULL);
1705  break;
1706  case TIFF_DATE:
1707  ADD_METADATA(count, "date", NULL);
1708  break;
1709  case TIFF_DOCUMENT_NAME:
1710  ADD_METADATA(count, "document_name", NULL);
1711  break;
1712  case TIFF_HOST_COMPUTER:
1713  ADD_METADATA(count, "computer", NULL);
1714  break;
1716  ADD_METADATA(count, "description", NULL);
1717  break;
1718  case TIFF_MAKE:
1719  ADD_METADATA(count, "make", NULL);
1720  break;
1721  case TIFF_MODEL:
1722  ADD_METADATA(count, "model", NULL);
1723  break;
1724  case TIFF_PAGE_NAME:
1725  ADD_METADATA(count, "page_name", NULL);
1726  break;
1727  case TIFF_PAGE_NUMBER:
1728  ADD_METADATA(count, "page_number", " / ");
1729  // need to seek back to re-read the page number
1730  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1731  // read the page number
1732  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1733  // get back to where we were before the previous seek
1734  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1735  break;
1736  case TIFF_SOFTWARE_NAME:
1737  ADD_METADATA(count, "software", NULL);
1738  break;
1739  case DNG_VERSION:
1740  if (count == 4) {
1741  unsigned int ver[4];
1742  ver[0] = ff_tget(&s->gb, type, s->le);
1743  ver[1] = ff_tget(&s->gb, type, s->le);
1744  ver[2] = ff_tget(&s->gb, type, s->le);
1745  ver[3] = ff_tget(&s->gb, type, s->le);
1746 
1747  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1748  ver[0], ver[1], ver[2], ver[3]);
1749 
1751  }
1752  break;
1753  case CINEMADNG_TIME_CODES:
1754  case CINEMADNG_FRAME_RATE:
1755  case CINEMADNG_T_STOP:
1756  case CINEMADNG_REEL_NAME:
1759  break;
1760  default:
1761  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1762  av_log(s->avctx, AV_LOG_ERROR,
1763  "Unknown or unsupported tag %d/0x%0X\n",
1764  tag, tag);
1765  return AVERROR_INVALIDDATA;
1766  }
1767  }
1768 end:
1769  if (s->bpp > 64U) {
1770  av_log(s->avctx, AV_LOG_ERROR,
1771  "This format is not supported (bpp=%d, %d components)\n",
1772  s->bpp, count);
1773  s->bpp = 0;
1774  return AVERROR_INVALIDDATA;
1775  }
1776  bytestream2_seek(&s->gb, start, SEEK_SET);
1777  return 0;
1778 }
1779 
1780 static int decode_frame(AVCodecContext *avctx,
1781  void *data, int *got_frame, AVPacket *avpkt)
1782 {
1783  TiffContext *const s = avctx->priv_data;
1784  AVFrame *const p = data;
1785  ThreadFrame frame = { .f = data };
1786  unsigned off, last_off;
1787  int le, ret, plane, planes;
1788  int i, j, entries, stride;
1789  unsigned soff, ssize;
1790  uint8_t *dst;
1791  GetByteContext stripsizes;
1792  GetByteContext stripdata;
1793  int retry_for_subifd, retry_for_page;
1794  int is_dng;
1795  int has_tile_bits, has_strip_bits;
1796 
1797  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1798 
1799  // parse image header
1800  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1801  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1802  return ret;
1803  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1804  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1805  return AVERROR_INVALIDDATA;
1806  }
1807  s->le = le;
1808  // TIFF_BPP is not a required tag and defaults to 1
1809 
1810  s->tiff_type = TIFF_TYPE_TIFF;
1811 again:
1812  s->is_thumbnail = 0;
1813  s->bppcount = s->bpp = 1;
1814  s->photometric = TIFF_PHOTOMETRIC_NONE;
1815  s->compr = TIFF_RAW;
1816  s->fill_order = 0;
1817  s->white_level = 0;
1818  s->is_bayer = 0;
1819  s->is_tiled = 0;
1820  s->is_jpeg = 0;
1821  s->cur_page = 0;
1822  s->last_tag = 0;
1823 
1824  for (i = 0; i < 65536; i++)
1825  s->dng_lut[i] = i;
1826 
1827  free_geotags(s);
1828 
1829  // Reset these offsets so we can tell if they were set this frame
1830  s->stripsizesoff = s->strippos = 0;
1831  /* parse image file directory */
1832  bytestream2_seek(&s->gb, off, SEEK_SET);
1833  entries = ff_tget_short(&s->gb, le);
1834  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1835  return AVERROR_INVALIDDATA;
1836  for (i = 0; i < entries; i++) {
1837  if ((ret = tiff_decode_tag(s, p)) < 0)
1838  return ret;
1839  }
1840 
1841  if (s->get_thumbnail && !s->is_thumbnail) {
1842  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1843  return AVERROR_EOF;
1844  }
1845 
1846  /** whether we should process this IFD's SubIFD */
1847  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1848  /** whether we should process this multi-page IFD's next page */
1849  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1850 
1851  last_off = off;
1852  if (retry_for_page) {
1853  // set offset to the next IFD
1854  off = ff_tget_long(&s->gb, le);
1855  } else if (retry_for_subifd) {
1856  // set offset to the SubIFD
1857  off = s->sub_ifd;
1858  }
1859 
1860  if (retry_for_subifd || retry_for_page) {
1861  if (!off) {
1862  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1863  return AVERROR_INVALIDDATA;
1864  }
1865  if (off <= last_off) {
1866  avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
1867  return AVERROR_INVALIDDATA;
1868  }
1869  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1870  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1871  return AVERROR_INVALIDDATA;
1872  }
1873  s->sub_ifd = 0;
1874  goto again;
1875  }
1876 
1877  /* At this point we've decided on which (Sub)IFD to process */
1878 
1879  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1880 
1881  for (i = 0; i<s->geotag_count; i++) {
1882  const char *keyname = get_geokey_name(s->geotags[i].key);
1883  if (!keyname) {
1884  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1885  continue;
1886  }
1887  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1888  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1889  continue;
1890  }
1891  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1892  if (ret<0) {
1893  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1894  return ret;
1895  }
1896  }
1897 
1898  if (is_dng) {
1899  int bps;
1900 
1901  if (s->bpp % s->bppcount)
1902  return AVERROR_INVALIDDATA;
1903  bps = s->bpp / s->bppcount;
1904  if (bps < 8 || bps > 32)
1905  return AVERROR_INVALIDDATA;
1906 
1907  if (s->white_level == 0)
1908  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
1909 
1910  if (s->white_level <= s->black_level) {
1911  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1912  s->black_level, s->white_level);
1913  return AVERROR_INVALIDDATA;
1914  }
1915 
1916  if (s->planar)
1917  return AVERROR_PATCHWELCOME;
1918  }
1919 
1920  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1921  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1922  return AVERROR_INVALIDDATA;
1923  }
1924 
1925  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
1926  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
1927 
1928  if (has_tile_bits && has_strip_bits) {
1929  av_log(avctx, AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
1930  return AVERROR_INVALIDDATA;
1931  }
1932 
1933  /* now we have the data and may start decoding */
1934  if ((ret = init_image(s, &frame)) < 0)
1935  return ret;
1936 
1937  if (!s->is_tiled) {
1938  if (s->strips == 1 && !s->stripsize) {
1939  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1940  s->stripsize = avpkt->size - s->stripoff;
1941  }
1942 
1943  if (s->stripsizesoff) {
1944  if (s->stripsizesoff >= (unsigned)avpkt->size)
1945  return AVERROR_INVALIDDATA;
1946  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1947  avpkt->size - s->stripsizesoff);
1948  }
1949  if (s->strippos) {
1950  if (s->strippos >= (unsigned)avpkt->size)
1951  return AVERROR_INVALIDDATA;
1952  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1953  avpkt->size - s->strippos);
1954  }
1955 
1956  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1957  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1958  return AVERROR_INVALIDDATA;
1959  }
1960  }
1961 
1962  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
1963  s->photometric == TIFF_PHOTOMETRIC_CFA) {
1965  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1967  }
1968 
1969  /* Handle DNG images with JPEG-compressed tiles */
1970 
1971  if (is_dng && s->is_tiled) {
1972  if (!s->is_jpeg) {
1973  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1974  return AVERROR_PATCHWELCOME;
1975  } else if (!s->is_bayer) {
1976  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1977  return AVERROR_PATCHWELCOME;
1978  } else {
1979  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1980  *got_frame = 1;
1981  return ret;
1982  }
1983  }
1984 
1985  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1986 
1987  planes = s->planar ? s->bppcount : 1;
1988  for (plane = 0; plane < planes; plane++) {
1989  uint8_t *five_planes = NULL;
1990  int remaining = avpkt->size;
1991  int decoded_height;
1992  stride = p->linesize[plane];
1993  dst = p->data[plane];
1994  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
1995  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1996  stride = stride * 5 / 4;
1997  five_planes =
1998  dst = av_malloc(stride * s->height);
1999  if (!dst)
2000  return AVERROR(ENOMEM);
2001  }
2002  for (i = 0; i < s->height; i += s->rps) {
2003  if (i)
2004  dst += s->rps * stride;
2005  if (s->stripsizesoff)
2006  ssize = ff_tget(&stripsizes, s->sstype, le);
2007  else
2008  ssize = s->stripsize;
2009 
2010  if (s->strippos)
2011  soff = ff_tget(&stripdata, s->sot, le);
2012  else
2013  soff = s->stripoff;
2014 
2015  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2016  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2017  av_freep(&five_planes);
2018  return AVERROR_INVALIDDATA;
2019  }
2020  remaining -= ssize;
2021  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2022  FFMIN(s->rps, s->height - i))) < 0) {
2023  if (avctx->err_recognition & AV_EF_EXPLODE) {
2024  av_freep(&five_planes);
2025  return ret;
2026  }
2027  break;
2028  }
2029  }
2030  decoded_height = FFMIN(i, s->height);
2031 
2032  if (s->predictor == 2) {
2033  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2034  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2035  return AVERROR_PATCHWELCOME;
2036  }
2037  dst = five_planes ? five_planes : p->data[plane];
2038  soff = s->bpp >> 3;
2039  if (s->planar)
2040  soff = FFMAX(soff / s->bppcount, 1);
2041  ssize = s->width * soff;
2042  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2043  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2044  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2045  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2046  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2047  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2048  for (i = 0; i < decoded_height; i++) {
2049  for (j = soff; j < ssize; j += 2)
2050  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2051  dst += stride;
2052  }
2053  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2054  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2055  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2056  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2057  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2058  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2059  for (i = 0; i < decoded_height; i++) {
2060  for (j = soff; j < ssize; j += 2)
2061  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2062  dst += stride;
2063  }
2064  } else {
2065  for (i = 0; i < decoded_height; i++) {
2066  for (j = soff; j < ssize; j++)
2067  dst[j] += dst[j - soff];
2068  dst += stride;
2069  }
2070  }
2071  }
2072 
2073  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2074  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2075  dst = p->data[plane];
2076  for (i = 0; i < s->height; i++) {
2077  for (j = 0; j < stride; j++)
2078  dst[j] = c - dst[j];
2079  dst += stride;
2080  }
2081  }
2082 
2083  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2084  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2085  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2086  uint8_t *src = five_planes ? five_planes : p->data[plane];
2087  dst = p->data[plane];
2088  for (i = 0; i < s->height; i++) {
2089  for (j = 0; j < s->width; j++) {
2090  int k = 255 - src[x * j + 3];
2091  int r = (255 - src[x * j ]) * k;
2092  int g = (255 - src[x * j + 1]) * k;
2093  int b = (255 - src[x * j + 2]) * k;
2094  dst[4 * j ] = r * 257 >> 16;
2095  dst[4 * j + 1] = g * 257 >> 16;
2096  dst[4 * j + 2] = b * 257 >> 16;
2097  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2098  }
2099  src += stride;
2100  dst += p->linesize[plane];
2101  }
2102  av_freep(&five_planes);
2103  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2104  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2105  dst = p->data[plane];
2106  for (i = 0; i < s->height; i++) {
2107  for (j = 0; j < s->width; j++) {
2108  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2109  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2110  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2111  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2112  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2113  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2114  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2115  AV_WB16(dst + 8 * j + 6, 65535);
2116  }
2117  dst += p->linesize[plane];
2118  }
2119  }
2120  }
2121 
2122  if (s->planar && s->bppcount > 2) {
2123  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2124  FFSWAP(int, p->linesize[0], p->linesize[2]);
2125  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2126  FFSWAP(int, p->linesize[0], p->linesize[1]);
2127  }
2128 
2129  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2130  uint16_t *dst = (uint16_t *)p->data[0];
2131  for (i = 0; i < s->height; i++) {
2132  for (j = 0; j < s->width; j++)
2133  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2134  dst += stride / 2;
2135  }
2136  }
2137 
2138  *got_frame = 1;
2139 
2140  return avpkt->size;
2141 }
2142 
2144 {
2145  TiffContext *s = avctx->priv_data;
2146  const AVCodec *codec;
2147  int ret;
2148 
2149  s->width = 0;
2150  s->height = 0;
2151  s->subsampling[0] =
2152  s->subsampling[1] = 1;
2153  s->avctx = avctx;
2154  ff_lzw_decode_open(&s->lzw);
2155  if (!s->lzw)
2156  return AVERROR(ENOMEM);
2158 
2159  /* Allocate JPEG frame */
2160  s->jpgframe = av_frame_alloc();
2161  if (!s->jpgframe)
2162  return AVERROR(ENOMEM);
2163 
2164  /* Prepare everything needed for JPEG decoding */
2166  if (!codec)
2167  return AVERROR_BUG;
2168  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2169  if (!s->avctx_mjpeg)
2170  return AVERROR(ENOMEM);
2171  s->avctx_mjpeg->flags = avctx->flags;
2172  s->avctx_mjpeg->flags2 = avctx->flags2;
2173  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2174  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2175  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2176  if (ret < 0) {
2177  return ret;
2178  }
2179 
2180  return 0;
2181 }
2182 
2183 static av_cold int tiff_end(AVCodecContext *avctx)
2184 {
2185  TiffContext *const s = avctx->priv_data;
2186 
2187  free_geotags(s);
2188 
2189  ff_lzw_decode_close(&s->lzw);
2190  av_freep(&s->deinvert_buf);
2191  s->deinvert_buf_size = 0;
2192  av_freep(&s->yuv_line);
2193  s->yuv_line_size = 0;
2194  av_freep(&s->fax_buffer);
2195  s->fax_buffer_size = 0;
2196  av_frame_free(&s->jpgframe);
2197  avcodec_free_context(&s->avctx_mjpeg);
2198  return 0;
2199 }
2200 
2201 #define OFFSET(x) offsetof(TiffContext, x)
2202 static const AVOption tiff_options[] = {
2203  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2204  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2205  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2206  { NULL },
2207 };
2208 
2209 static const AVClass tiff_decoder_class = {
2210  .class_name = "TIFF decoder",
2211  .item_name = av_default_item_name,
2212  .option = tiff_options,
2213  .version = LIBAVUTIL_VERSION_INT,
2214 };
2215 
2217  .name = "tiff",
2218  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2219  .type = AVMEDIA_TYPE_VIDEO,
2220  .id = AV_CODEC_ID_TIFF,
2221  .priv_data_size = sizeof(TiffContext),
2222  .init = tiff_init,
2223  .close = tiff_end,
2224  .decode = decode_frame,
2225  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2226  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2227  .priv_class = &tiff_decoder_class,
2228 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:67
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:548
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
TiffContext::gb
GetByteContext gb
Definition: tiff.c:57
AVCodec
AVCodec.
Definition: codec.h:190
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:142
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:328
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:148
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1247
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:193
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:126
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
get_geokey_val
static char * get_geokey_val(int key, int val)
Definition: tiff.c:172
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:88
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:489
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:785
TiffContext::strippos
int strippos
Definition: tiff.c:95
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:89
init_image
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1055
ff_tiff_decoder
AVCodec ff_tiff_decoder
Definition: tiff.c:2216
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:155
OFFSET
#define OFFSET(x)
Definition: tiff.c:2201
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
TiffContext::sot
int sot
Definition: tiff.c:94
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:240
TiffContext::fax_buffer_size
unsigned int fax_buffer_size
Definition: tiff.c:111
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:122
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:150
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2183
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:100
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:128
internal.h
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:91
AVPacket::data
uint8_t * data
Definition: packet.h:355
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:194
AVOption
AVOption.
Definition: opt.h:246
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:41
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
data
const char data[16]
Definition: mxf.c:91
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:127
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:336
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:61
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:85
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:146
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:154
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:156
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:121
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
av_clip_uint16_c
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:97
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:424
TiffGeoTagKeyName
Definition: tiff.h:215
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:185
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:129
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:141
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:104
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2202
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:65
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:198
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:54
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:189
dng_decode_strip
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1042
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:107
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:74
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:82
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:393
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:362
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:117
U
#define U(x)
Definition: vp56_arith.h:37
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:86
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:57
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:140
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:106
TiffContext::tile_length
int tile_length
Definition: tiff.c:101
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:70
val
static double val(void *priv, double ch)
Definition: aeval.c:76
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:281
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::dct_algo
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1716
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:176
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:73
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:485
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:114
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:105
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:40
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
TiffGeoTag
Definition: tiff.h:207
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:813
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:93
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:48
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:114
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
TiffContext::stripsize
int stripsize
Definition: tiff.c:95
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
width
#define width
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:124
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
TiffContext::width
int width
Definition: tiff.c:68
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:260
g
const char * g
Definition: vf_curves.c:115
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:62
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:108
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:138
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:876
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:39
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:196
TiffContext::black_level
unsigned black_level
Definition: tiff.c:86
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:90
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:64
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:56
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:84
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:168
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:61
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:73
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:74
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:163
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:261
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:421
if
if(ret)
Definition: filter_design.txt:179
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:113
TiffContext::height
int height
Definition: tiff.c:68
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:66
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:125
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2143
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1780
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:191
TiffContext
Definition: tiff.c:54
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:81
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:56
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:188
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:63
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:92
src
#define src
Definition: vp8dsp.c:254
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:192
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
mathops.h
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:423
MJpegDecodeContext
Definition: mjpegdec.h:46
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:78
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:38
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:75
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:112
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:60
TIFF_G3
@ TIFF_G3
Definition: tiff.h:123
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:49
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:81
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:70
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
desc
const char * desc
Definition: nvenc.c:79
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ff_codec_open2_recursive
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:553
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
TiffContext::fax_buffer
uint8_t * fax_buffer
Definition: tiff.c:110
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:49
bps
unsigned bps
Definition: movenc.c:1533
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:98
size
int size
Definition: twinvq_data.h:11134
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:69
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:135
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:55
TiffContext::fill_order
int fill_order
Definition: tiff.c:79
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:94
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:80
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:58
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
TiffContext::white_level
unsigned white_level
Definition: tiff.c:87
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:95
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
TiffContext::planar
int planar
Definition: tiff.c:75
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:52
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:50
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:158
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2209
RET_GEOKEY
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:133
planes
static const struct @315 planes[]
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:106
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:69
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:197
TiffContext::le
int le
Definition: tiff.c:72
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:115
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:76
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:72
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:195
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:186
TiffContext::tile_width
int tile_width
Definition: tiff.c:101
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:77
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:69
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:349
TiffContext::res
uint32_t res[4]
Definition: tiff.c:80
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:93
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:65
TiffContext::tile_count
int tile_count
Definition: tiff.c:102
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:262
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:67
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1729
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:41
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:103
TiffContext::stripoff
int stripoff
Definition: tiff.c:95
len
int len
Definition: vorbis_enc_data.h:452
TIFF_UNDEFINED
@ TIFF_UNDEFINED
Definition: tiff_common.h:44
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:184
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:90
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:59
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:79
avcodec.h
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1532
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:76
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:107
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:919
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:71
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:51
pos
unsigned int pos
Definition: spdifenc.c:412
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:138
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:53
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:183
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:82
ff_tiff_proj_cs_type_codes
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ADD_METADATA
#define ADD_METADATA(count, name, sep)
ThreadFrame
Definition: thread.h:34
TiffContext::sstype
int sstype
Definition: tiff.c:93
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:77
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:42
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:586
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:96
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1228
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:131
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:522
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:96
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:116
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:99
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:64
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:806
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
shift
static int shift(int a, int b)
Definition: sonic.c:82
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:95
faxcompr.h
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:122
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:130
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:187
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:83
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:100
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:63
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:266
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:120
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:144
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:91
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:145
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:379
ff_tiff_projection_codes
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
TiffContext::predictor
int predictor
Definition: tiff.c:78
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:422
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:967
int
int
Definition: ffmpeg_filter.c:192
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:190
TiffContext::strips
int strips
Definition: tiff.c:93
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:152
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:113
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:90
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:263
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:109
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:94
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:136