FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82  unsigned last_tag;
83 
84  int is_bayer;
86  unsigned black_level;
87  unsigned white_level;
88  uint16_t dng_lut[65536];
89 
90  uint32_t sub_ifd;
91  uint16_t cur_page;
92 
93  int strips, rps, sstype;
94  int sot;
97 
98  /* Tile support */
99  int is_tiled;
103 
104  int is_jpeg;
105 
109  unsigned int yuv_line_size;
111  unsigned int fax_buffer_size;
112 
115 } TiffContext;
116 
118  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
119  s->tiff_type = tiff_type;
120 }
121 
122 static void free_geotags(TiffContext *const s)
123 {
124  int i;
125  for (i = 0; i < s->geotag_count; i++) {
126  if (s->geotags[i].val)
127  av_freep(&s->geotags[i].val);
128  }
129  av_freep(&s->geotags);
130  s->geotag_count = 0;
131 }
132 
133 #define RET_GEOKEY(TYPE, array, element)\
134  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
135  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
136  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
137 
138 static const char *get_geokey_name(int key)
139 {
140  RET_GEOKEY(VERT, vert, name);
141  RET_GEOKEY(PROJ, proj, name);
142  RET_GEOKEY(GEOG, geog, name);
143  RET_GEOKEY(CONF, conf, name);
144 
145  return NULL;
146 }
147 
148 static int get_geokey_type(int key)
149 {
150  RET_GEOKEY(VERT, vert, type);
151  RET_GEOKEY(PROJ, proj, type);
152  RET_GEOKEY(GEOG, geog, type);
153  RET_GEOKEY(CONF, conf, type);
154 
155  return AVERROR_INVALIDDATA;
156 }
157 
158 static int cmp_id_key(const void *id, const void *k)
159 {
160  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
161 }
162 
163 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
164 {
165  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
166  if(r)
167  return r->name;
168 
169  return NULL;
170 }
171 
172 static char *get_geokey_val(int key, int val)
173 {
174  char *ap;
175 
176  if (val == TIFF_GEO_KEY_UNDEFINED)
177  return av_strdup("undefined");
178  if (val == TIFF_GEO_KEY_USER_DEFINED)
179  return av_strdup("User-Defined");
180 
181 #define RET_GEOKEY_VAL(TYPE, array)\
182  if (val >= TIFF_##TYPE##_OFFSET &&\
183  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
184  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
185 
186  switch (key) {
188  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
189  break;
191  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
192  break;
196  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
197  break;
200  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
201  break;
203  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
204  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
205  break;
207  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
208  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
209  break;
211  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
212  break;
214  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
215  break;
218  if(ap) return ap;
219  break;
222  if(ap) return ap;
223  break;
225  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
226  break;
228  RET_GEOKEY_VAL(VERT_CS, vert_cs);
229  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
230  break;
231 
232  }
233 
234  ap = av_malloc(14);
235  if (ap)
236  snprintf(ap, 14, "Unknown-%d", val);
237  return ap;
238 }
239 
240 static char *doubles2str(double *dp, int count, const char *sep)
241 {
242  int i;
243  char *ap, *ap0;
244  uint64_t component_len;
245  if (!sep) sep = ", ";
246  component_len = 24LL + strlen(sep);
247  if (count >= (INT_MAX - 1)/component_len)
248  return NULL;
249  ap = av_malloc(component_len * count + 1);
250  if (!ap)
251  return NULL;
252  ap0 = ap;
253  ap[0] = '\0';
254  for (i = 0; i < count; i++) {
255  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
256  if(l >= component_len) {
257  av_free(ap0);
258  return NULL;
259  }
260  ap += l;
261  }
262  ap0[strlen(ap0) - strlen(sep)] = '\0';
263  return ap0;
264 }
265 
266 static int add_metadata(int count, int type,
267  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
268 {
269  switch(type) {
270  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
271  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
272  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
273  default : return AVERROR_INVALIDDATA;
274  };
275 }
276 
277 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
278  const uint8_t *src, int src_stride, int width, int height,
279  int is_single_comp, int is_u16);
280 
282  unsigned int bpp, uint8_t* dst,
283  int usePtr, const uint8_t *src,
284  uint8_t c, int width, int offset)
285 {
286  switch (bpp) {
287  case 1:
288  while (--width >= 0) {
289  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
290  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
291  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
292  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
293  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
294  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
295  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
296  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
297  }
298  break;
299  case 2:
300  while (--width >= 0) {
301  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
302  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
303  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
304  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
305  }
306  break;
307  case 4:
308  while (--width >= 0) {
309  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
310  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
311  }
312  break;
313  case 10:
314  case 12:
315  case 14: {
316  uint16_t *dst16 = (uint16_t *)dst;
317  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
318  uint8_t shift = is_dng ? 0 : 16 - bpp;
320 
321  init_get_bits8(&gb, src, width);
322  for (int i = 0; i < s->width; i++) {
323  dst16[i] = get_bits(&gb, bpp) << shift;
324  }
325  }
326  break;
327  default:
328  if (usePtr) {
329  memcpy(dst + offset, src, width);
330  } else {
331  memset(dst + offset, c, width);
332  }
333  }
334 }
335 
336 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
337 {
338  int i;
339 
341  if (!s->deinvert_buf)
342  return AVERROR(ENOMEM);
343  for (i = 0; i < size; i++)
344  s->deinvert_buf[i] = ff_reverse[src[i]];
345 
346  return 0;
347 }
348 
349 static void unpack_gray(TiffContext *s, AVFrame *p,
350  const uint8_t *src, int lnum, int width, int bpp)
351 {
353  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
354 
355  init_get_bits8(&gb, src, width);
356 
357  for (int i = 0; i < s->width; i++) {
358  dst[i] = get_bits(&gb, bpp);
359  }
360 }
361 
362 static void unpack_yuv(TiffContext *s, AVFrame *p,
363  const uint8_t *src, int lnum)
364 {
365  int i, j, k;
366  int w = (s->width - 1) / s->subsampling[0] + 1;
367  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
368  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
369  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
370  for (i = 0; i < w; i++) {
371  for (j = 0; j < s->subsampling[1]; j++)
372  for (k = 0; k < s->subsampling[0]; k++)
373  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
374  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
375  *pu++ = *src++;
376  *pv++ = *src++;
377  }
378  }else{
379  for (i = 0; i < w; i++) {
380  for (j = 0; j < s->subsampling[1]; j++)
381  for (k = 0; k < s->subsampling[0]; k++)
382  p->data[0][(lnum + j) * p->linesize[0] +
383  i * s->subsampling[0] + k] = *src++;
384  *pu++ = *src++;
385  *pv++ = *src++;
386  }
387  }
388 }
389 
390 #if CONFIG_ZLIB
391 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
392  int size)
393 {
394  z_stream zstream = { 0 };
395  int zret;
396 
397  zstream.next_in = src;
398  zstream.avail_in = size;
399  zstream.next_out = dst;
400  zstream.avail_out = *len;
401  zret = inflateInit(&zstream);
402  if (zret != Z_OK) {
403  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
404  return zret;
405  }
406  zret = inflate(&zstream, Z_SYNC_FLUSH);
407  inflateEnd(&zstream);
408  *len = zstream.total_out;
409  return zret == Z_STREAM_END ? Z_OK : zret;
410 }
411 
412 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
413  const uint8_t *src, int size, int width, int lines,
414  int strip_start, int is_yuv)
415 {
416  uint8_t *zbuf;
417  unsigned long outlen;
418  int ret, line;
419  outlen = width * lines;
420  zbuf = av_malloc(outlen);
421  if (!zbuf)
422  return AVERROR(ENOMEM);
423  if (s->fill_order) {
424  if ((ret = deinvert_buffer(s, src, size)) < 0) {
425  av_free(zbuf);
426  return ret;
427  }
428  src = s->deinvert_buf;
429  }
430  ret = tiff_uncompress(zbuf, &outlen, src, size);
431  if (ret != Z_OK) {
433  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
434  (unsigned long)width * lines, ret);
435  av_free(zbuf);
436  return AVERROR_UNKNOWN;
437  }
438  src = zbuf;
439  for (line = 0; line < lines; line++) {
440  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
441  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
442  } else {
443  memcpy(dst, src, width);
444  }
445  if (is_yuv) {
446  unpack_yuv(s, p, dst, strip_start + line);
447  line += s->subsampling[1] - 1;
448  }
449  dst += stride;
450  src += width;
451  }
452  av_free(zbuf);
453  return 0;
454 }
455 #endif
456 
457 #if CONFIG_LZMA
458 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
459  int size)
460 {
461  lzma_stream stream = LZMA_STREAM_INIT;
462  lzma_ret ret;
463 
464  stream.next_in = (uint8_t *)src;
465  stream.avail_in = size;
466  stream.next_out = dst;
467  stream.avail_out = *len;
468  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
469  if (ret != LZMA_OK) {
470  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
471  return ret;
472  }
473  ret = lzma_code(&stream, LZMA_RUN);
474  lzma_end(&stream);
475  *len = stream.total_out;
476  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
477 }
478 
479 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
480  const uint8_t *src, int size, int width, int lines,
481  int strip_start, int is_yuv)
482 {
483  uint64_t outlen = width * (uint64_t)lines;
484  int ret, line;
485  uint8_t *buf = av_malloc(outlen);
486  if (!buf)
487  return AVERROR(ENOMEM);
488  if (s->fill_order) {
489  if ((ret = deinvert_buffer(s, src, size)) < 0) {
490  av_free(buf);
491  return ret;
492  }
493  src = s->deinvert_buf;
494  }
495  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
496  if (ret != LZMA_OK) {
498  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
499  (uint64_t)width * lines, ret);
500  av_free(buf);
501  return AVERROR_UNKNOWN;
502  }
503  src = buf;
504  for (line = 0; line < lines; line++) {
505  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
506  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
507  } else {
508  memcpy(dst, src, width);
509  }
510  if (is_yuv) {
511  unpack_yuv(s, p, dst, strip_start + line);
512  line += s->subsampling[1] - 1;
513  }
514  dst += stride;
515  src += width;
516  }
517  av_free(buf);
518  return 0;
519 }
520 #endif
521 
522 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
523  const uint8_t *src, int size, int width, int lines)
524 {
525  int i, ret = 0;
526  int line;
527  uint8_t *src2;
528 
530  src2 = s->fax_buffer;
531 
532  if (!src2) {
534  "Error allocating temporary buffer\n");
535  return AVERROR(ENOMEM);
536  }
537 
538  if (!s->fill_order) {
539  memcpy(src2, src, size);
540  } else {
541  for (i = 0; i < size; i++)
542  src2[i] = ff_reverse[src[i]];
543  }
544  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
545  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
546  s->compr, s->fax_opts);
547  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
548  for (line = 0; line < lines; line++) {
549  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
550  dst += stride;
551  }
552  return ret;
553 }
554 
556 
557 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
558  const uint8_t *src, int size, int strip_start, int lines)
559 {
560  PutByteContext pb;
561  int c, line, pixels, code, ret;
562  const uint8_t *ssrc = src;
563  int width = ((s->width * s->bpp) + 7) >> 3;
565  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
566  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
567  desc->nb_components >= 3;
568  int is_dng;
569 
570  if (s->planar)
571  width /= s->bppcount;
572 
573  if (size <= 0)
574  return AVERROR_INVALIDDATA;
575 
576  if (is_yuv) {
577  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
578  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
579  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
580  if (s->yuv_line == NULL) {
581  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
582  return AVERROR(ENOMEM);
583  }
584  dst = s->yuv_line;
585  stride = 0;
586 
587  width = (s->width - 1) / s->subsampling[0] + 1;
588  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
589  av_assert0(width <= bytes_per_row);
590  av_assert0(s->bpp == 24);
591  }
592  if (s->is_bayer) {
593  av_assert0(width == (s->bpp * s->width + 7) >> 3);
594  }
595  if (p->format == AV_PIX_FMT_GRAY12) {
597  if (s->yuv_line == NULL) {
598  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
599  return AVERROR(ENOMEM);
600  }
601  dst = s->yuv_line;
602  stride = 0;
603  }
604 
605  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
606 #if CONFIG_ZLIB
607  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
608  strip_start, is_yuv);
609 #else
611  "zlib support not enabled, "
612  "deflate compression not supported\n");
613  return AVERROR(ENOSYS);
614 #endif
615  }
616  if (s->compr == TIFF_LZMA) {
617 #if CONFIG_LZMA
618  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
619  strip_start, is_yuv);
620 #else
622  "LZMA support not enabled\n");
623  return AVERROR(ENOSYS);
624 #endif
625  }
626  if (s->compr == TIFF_LZW) {
627  if (s->fill_order) {
628  if ((ret = deinvert_buffer(s, src, size)) < 0)
629  return ret;
630  ssrc = src = s->deinvert_buf;
631  }
632  if (size > 1 && !src[0] && (src[1]&1)) {
633  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
634  }
635  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
636  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
637  return ret;
638  }
639  for (line = 0; line < lines; line++) {
640  pixels = ff_lzw_decode(s->lzw, dst, width);
641  if (pixels < width) {
642  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
643  pixels, width);
644  return AVERROR_INVALIDDATA;
645  }
646  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
647  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
648  if (is_yuv) {
649  unpack_yuv(s, p, dst, strip_start + line);
650  line += s->subsampling[1] - 1;
651  } else if (p->format == AV_PIX_FMT_GRAY12) {
652  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
653  }
654  dst += stride;
655  }
656  return 0;
657  }
658  if (s->compr == TIFF_CCITT_RLE ||
659  s->compr == TIFF_G3 ||
660  s->compr == TIFF_G4) {
661  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
662  return AVERROR_INVALIDDATA;
663 
664  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
665  }
666 
667  bytestream2_init(&s->gb, src, size);
668  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
669 
670  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
671 
672  /* Decode JPEG-encoded DNGs with strips */
673  if (s->compr == TIFF_NEWJPEG && is_dng) {
674  if (s->strips > 1) {
675  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
676  return AVERROR_PATCHWELCOME;
677  }
678  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
679  return ret;
680  return 0;
681  }
682 
683  if (is_dng && stride == 0)
684  return AVERROR_INVALIDDATA;
685 
686  for (line = 0; line < lines; line++) {
687  if (src - ssrc > size) {
688  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
689  return AVERROR_INVALIDDATA;
690  }
691 
692  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
693  break;
694  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
695  switch (s->compr) {
696  case TIFF_RAW:
697  if (ssrc + size - src < width)
698  return AVERROR_INVALIDDATA;
699 
700  if (!s->fill_order) {
701  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
702  dst, 1, src, 0, width, 0);
703  } else {
704  int i;
705  for (i = 0; i < width; i++)
706  dst[i] = ff_reverse[src[i]];
707  }
708 
709  /* Color processing for DNG images with uncompressed strips (non-tiled) */
710  if (is_dng) {
711  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
712 
713  is_u16 = (s->bpp / s->bppcount > 8);
714  pixel_size_bits = (is_u16 ? 16 : 8);
715  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
716 
717  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
718  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
719  dng_blit(s,
720  dst,
721  0, // no stride, only 1 line
722  dst,
723  0, // no stride, only 1 line
724  elements,
725  1,
726  0, // single-component variation is only preset in JPEG-encoded DNGs
727  is_u16);
728  }
729 
730  src += width;
731  break;
732  case TIFF_PACKBITS:
733  for (pixels = 0; pixels < width;) {
734  if (ssrc + size - src < 2) {
735  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
736  return AVERROR_INVALIDDATA;
737  }
738  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
739  if (code >= 0) {
740  code++;
741  if (pixels + code > width ||
742  ssrc + size - src < code) {
744  "Copy went out of bounds\n");
745  return AVERROR_INVALIDDATA;
746  }
748  dst, 1, src, 0, code, pixels);
749  src += code;
750  pixels += code;
751  } else if (code != -128) { // -127..-1
752  code = (-code) + 1;
753  if (pixels + code > width) {
755  "Run went out of bounds\n");
756  return AVERROR_INVALIDDATA;
757  }
758  c = *src++;
760  dst, 0, NULL, c, code, pixels);
761  pixels += code;
762  }
763  }
764  if (s->fill_order) {
765  int i;
766  for (i = 0; i < width; i++)
767  dst[i] = ff_reverse[dst[i]];
768  }
769  break;
770  }
771  if (is_yuv) {
772  unpack_yuv(s, p, dst, strip_start + line);
773  line += s->subsampling[1] - 1;
774  } else if (p->format == AV_PIX_FMT_GRAY12) {
775  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
776  }
777  dst += stride;
778  }
779  return 0;
780 }
781 
782 /**
783  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
784  */
785 static uint16_t av_always_inline dng_process_color16(uint16_t value,
786  const uint16_t *lut,
787  uint16_t black_level,
788  float scale_factor) {
789  float value_norm;
790 
791  // Lookup table lookup
792  if (lut)
793  value = lut[value];
794 
795  // Black level subtraction
796  value = av_clip_uint16_c((unsigned)value - black_level);
797 
798  // Color scaling
799  value_norm = (float)value * scale_factor;
800 
801  value = av_clip_uint16_c(value_norm * 65535);
802 
803  return value;
804 }
805 
806 static uint16_t av_always_inline dng_process_color8(uint16_t value,
807  const uint16_t *lut,
808  uint16_t black_level,
809  float scale_factor) {
810  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
811 }
812 
813 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
814  const uint8_t *src, int src_stride,
815  int width, int height, int is_single_comp, int is_u16)
816 {
817  int line, col;
818  float scale_factor;
819 
820  scale_factor = 1.0f / (s->white_level - s->black_level);
821 
822  if (is_single_comp) {
823  if (!is_u16)
824  return; /* <= 8bpp unsupported */
825 
826  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
827  (split vertically in the middle). */
828  for (line = 0; line < height / 2; line++) {
829  uint16_t *dst_u16 = (uint16_t *)dst;
830  uint16_t *src_u16 = (uint16_t *)src;
831 
832  /* Blit first half of input row row to initial row of output */
833  for (col = 0; col < width; col++)
834  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
835 
836  /* Advance the destination pointer by a row (source pointer remains in the same place) */
837  dst += dst_stride * sizeof(uint16_t);
838  dst_u16 = (uint16_t *)dst;
839 
840  /* Blit second half of input row row to next row of output */
841  for (col = 0; col < width; col++)
842  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
843 
844  dst += dst_stride * sizeof(uint16_t);
845  src += src_stride * sizeof(uint16_t);
846  }
847  } else {
848  /* Input and output image are the same size and the MJpeg decoder has done per-component
849  deinterleaving, so blitting here is straightforward. */
850  if (is_u16) {
851  for (line = 0; line < height; line++) {
852  uint16_t *dst_u16 = (uint16_t *)dst;
853  uint16_t *src_u16 = (uint16_t *)src;
854 
855  for (col = 0; col < width; col++)
856  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
857 
858  dst += dst_stride * sizeof(uint16_t);
859  src += src_stride * sizeof(uint16_t);
860  }
861  } else {
862  for (line = 0; line < height; line++) {
863  uint8_t *dst_u8 = dst;
864  const uint8_t *src_u8 = src;
865 
866  for (col = 0; col < width; col++)
867  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut, s->black_level, scale_factor);
868 
869  dst += dst_stride;
870  src += src_stride;
871  }
872  }
873  }
874 }
875 
876 static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
877  int tile_byte_count, int dst_x, int dst_y, int w, int h)
878 {
879  TiffContext *s = avctx->priv_data;
880  AVPacket jpkt;
881  uint8_t *dst_data, *src_data;
882  uint32_t dst_offset; /* offset from dst buffer in pixels */
883  int is_single_comp, is_u16, pixel_size;
884  int ret;
885 
886  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
887  return AVERROR_INVALIDDATA;
888 
889  /* Prepare a packet and send to the MJPEG decoder */
890  av_init_packet(&jpkt);
891  jpkt.data = (uint8_t*)s->gb.buffer;
892  jpkt.size = tile_byte_count;
893 
894  if (s->is_bayer) {
895  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
896  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
897  image or not from its own data (and we need that information when decoding it). */
898  mjpegdecctx->bayer = 1;
899  }
900 
901  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
902  if (ret < 0) {
903  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
904  return ret;
905  }
906 
908  if (ret < 0) {
909  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
910 
911  /* Normally skip, error if explode */
912  if (avctx->err_recognition & AV_EF_EXPLODE)
913  return AVERROR_INVALIDDATA;
914  else
915  return 0;
916  }
917 
918  is_u16 = (s->bpp > 8);
919 
920  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
921 
922  if (s->jpgframe->width != s->avctx_mjpeg->width ||
923  s->jpgframe->height != s->avctx_mjpeg->height ||
924  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
925  return AVERROR_INVALIDDATA;
926 
927  /* See dng_blit for explanation */
928  if (s->avctx_mjpeg->width == w * 2 &&
929  s->avctx_mjpeg->height == h / 2 &&
931  is_single_comp = 1;
932  } else if (s->avctx_mjpeg->width >= w &&
933  s->avctx_mjpeg->height >= h &&
935  ) {
936  is_single_comp = 0;
937  } else
938  return AVERROR_INVALIDDATA;
939 
940  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
941 
942  if (is_single_comp && !is_u16) {
943  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
945  return AVERROR_PATCHWELCOME;
946  }
947 
948  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
949  dst_data = frame->data[0] + dst_offset * pixel_size;
950  src_data = s->jpgframe->data[0];
951 
952  dng_blit(s,
953  dst_data,
954  frame->linesize[0] / pixel_size,
955  src_data,
956  s->jpgframe->linesize[0] / pixel_size,
957  w,
958  h,
959  is_single_comp,
960  is_u16);
961 
963 
964  return 0;
965 }
966 
967 static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
968 {
969  TiffContext *s = avctx->priv_data;
970  int tile_idx;
971  int tile_offset_offset, tile_offset;
972  int tile_byte_count_offset, tile_byte_count;
973  int tile_count_x, tile_count_y;
974  int tile_width, tile_length;
975  int has_width_leftover, has_height_leftover;
976  int tile_x = 0, tile_y = 0;
977  int pos_x = 0, pos_y = 0;
978  int ret;
979 
980  s->jpgframe->width = s->tile_width;
981  s->jpgframe->height = s->tile_length;
982 
983  s->avctx_mjpeg->width = s->tile_width;
984  s->avctx_mjpeg->height = s->tile_length;
985 
986  has_width_leftover = (s->width % s->tile_width != 0);
987  has_height_leftover = (s->height % s->tile_length != 0);
988 
989  /* Calculate tile counts (round up) */
990  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
991  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
992 
993  /* Iterate over the number of tiles */
994  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
995  tile_x = tile_idx % tile_count_x;
996  tile_y = tile_idx / tile_count_x;
997 
998  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
999  tile_width = s->width % s->tile_width;
1000  else
1001  tile_width = s->tile_width;
1002 
1003  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1004  tile_length = s->height % s->tile_length;
1005  else
1006  tile_length = s->tile_length;
1007 
1008  /* Read tile offset */
1009  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1010  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1011  tile_offset = ff_tget_long(&s->gb, s->le);
1012 
1013  /* Read tile byte size */
1014  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1015  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1016  tile_byte_count = ff_tget_long(&s->gb, s->le);
1017 
1018  /* Seek to tile data */
1019  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1020 
1021  /* Decode JPEG tile and copy it in the reference frame */
1022  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1023 
1024  if (ret < 0)
1025  return ret;
1026 
1027  /* Advance current positions */
1028  pos_x += tile_width;
1029  if (tile_x == tile_count_x - 1) { // If on the right edge
1030  pos_x = 0;
1031  pos_y += tile_length;
1032  }
1033  }
1034 
1035  /* Frame is ready to be output */
1036  frame->pict_type = AV_PICTURE_TYPE_I;
1037  frame->key_frame = 1;
1038 
1039  return avpkt->size;
1040 }
1041 
1042 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
1043 {
1044  TiffContext *s = avctx->priv_data;
1045 
1046  s->jpgframe->width = s->width;
1047  s->jpgframe->height = s->height;
1048 
1049  s->avctx_mjpeg->width = s->width;
1050  s->avctx_mjpeg->height = s->height;
1051 
1052  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1053 }
1054 
1055 static int init_image(TiffContext *s, ThreadFrame *frame)
1056 {
1057  int ret;
1058  int create_gray_palette = 0;
1059 
1060  // make sure there is no aliasing in the following switch
1061  if (s->bpp >= 100 || s->bppcount >= 10) {
1063  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1064  s->bpp, s->bppcount);
1065  return AVERROR_INVALIDDATA;
1066  }
1067 
1068  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1069  case 11:
1070  if (!s->palette_is_set) {
1072  break;
1073  }
1074  case 21:
1075  case 41:
1077  if (!s->palette_is_set) {
1078  create_gray_palette = 1;
1079  }
1080  break;
1081  case 81:
1083  break;
1084  case 121:
1086  break;
1087  case 10081:
1088  switch (AV_RL32(s->pattern)) {
1089  case 0x02010100:
1091  break;
1092  case 0x00010102:
1094  break;
1095  case 0x01000201:
1097  break;
1098  case 0x01020001:
1100  break;
1101  default:
1102  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1103  AV_RL32(s->pattern));
1104  return AVERROR_PATCHWELCOME;
1105  }
1106  break;
1107  case 10101:
1108  case 10121:
1109  case 10141:
1110  case 10161:
1111  switch (AV_RL32(s->pattern)) {
1112  case 0x02010100:
1114  break;
1115  case 0x00010102:
1117  break;
1118  case 0x01000201:
1120  break;
1121  case 0x01020001:
1123  break;
1124  default:
1125  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1126  AV_RL32(s->pattern));
1127  return AVERROR_PATCHWELCOME;
1128  }
1129  break;
1130  case 243:
1131  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1132  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1134  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1136  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1138  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1140  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1142  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1144  } else {
1145  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1146  return AVERROR_PATCHWELCOME;
1147  }
1148  } else
1150  break;
1151  case 161:
1153  break;
1154  case 162:
1156  break;
1157  case 322:
1159  break;
1160  case 324:
1162  break;
1163  case 405:
1166  else {
1168  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1169  return AVERROR_PATCHWELCOME;
1170  }
1171  break;
1172  case 483:
1174  break;
1175  case 644:
1177  break;
1178  case 1243:
1180  break;
1181  case 1324:
1183  break;
1184  case 1483:
1186  break;
1187  case 1644:
1189  break;
1190  default:
1192  "This format is not supported (bpp=%d, bppcount=%d)\n",
1193  s->bpp, s->bppcount);
1194  return AVERROR_INVALIDDATA;
1195  }
1196 
1197  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1199  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1200  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1201  desc->nb_components < 3) {
1202  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1203  return AVERROR_INVALIDDATA;
1204  }
1205  }
1206 
1207  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1208  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1209  if (ret < 0)
1210  return ret;
1211  }
1212  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1213  return ret;
1214  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1215  if (!create_gray_palette)
1216  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1217  else {
1218  /* make default grayscale pal */
1219  int i;
1220  uint32_t *pal = (uint32_t *)frame->f->data[1];
1221  for (i = 0; i < 1<<s->bpp; i++)
1222  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1223  }
1224  }
1225  return 0;
1226 }
1227 
1228 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1229 {
1230  int offset = tag == TIFF_YRES ? 2 : 0;
1231  s->res[offset++] = num;
1232  s->res[offset] = den;
1233  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1234  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1235  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1236  if (num > INT64_MAX || den > INT64_MAX) {
1237  num = num >> 1;
1238  den = den >> 1;
1239  }
1241  num, den, INT32_MAX);
1242  if (!s->avctx->sample_aspect_ratio.den)
1243  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1244  }
1245 }
1246 
1247 static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
1248 {
1249  AVFrameSideData *sd;
1250  GetByteContext gb_temp;
1251  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1252  int i, start;
1253  int pos;
1254  int ret;
1255  double *dp;
1256 
1257  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1258  if (ret < 0) {
1259  goto end;
1260  }
1261  if (tag <= s->last_tag)
1262  return AVERROR_INVALIDDATA;
1263 
1264  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1265  if (tag != TIFF_STRIP_SIZE)
1266  s->last_tag = tag;
1267 
1268  off = bytestream2_tell(&s->gb);
1269  if (count == 1) {
1270  switch (type) {
1271  case TIFF_BYTE:
1272  case TIFF_SHORT:
1273  case TIFF_LONG:
1274  value = ff_tget(&s->gb, type, s->le);
1275  break;
1276  case TIFF_RATIONAL:
1277  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1278  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1279  if (!value2) {
1280  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1281  return AVERROR_INVALIDDATA;
1282  }
1283 
1284  break;
1285  case TIFF_STRING:
1286  if (count <= 4) {
1287  break;
1288  }
1289  default:
1290  value = UINT_MAX;
1291  }
1292  }
1293 
1294  switch (tag) {
1295  case TIFF_SUBFILE:
1296  s->is_thumbnail = (value != 0);
1297  break;
1298  case TIFF_WIDTH:
1299  s->width = value;
1300  break;
1301  case TIFF_HEIGHT:
1302  s->height = value;
1303  break;
1304  case TIFF_BPP:
1305  if (count > 5 || count <= 0) {
1307  "This format is not supported (bpp=%d, %d components)\n",
1308  value, count);
1309  return AVERROR_INVALIDDATA;
1310  }
1311  s->bppcount = count;
1312  if (count == 1)
1313  s->bpp = value;
1314  else {
1315  switch (type) {
1316  case TIFF_BYTE:
1317  case TIFF_SHORT:
1318  case TIFF_LONG:
1319  s->bpp = 0;
1320  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1321  return AVERROR_INVALIDDATA;
1322  for (i = 0; i < count; i++)
1323  s->bpp += ff_tget(&s->gb, type, s->le);
1324  break;
1325  default:
1326  s->bpp = -1;
1327  }
1328  }
1329  break;
1331  if (count != 1) {
1333  "Samples per pixel requires a single value, many provided\n");
1334  return AVERROR_INVALIDDATA;
1335  }
1336  if (value > 5 || value <= 0) {
1338  "Invalid samples per pixel %d\n", value);
1339  return AVERROR_INVALIDDATA;
1340  }
1341  if (s->bppcount == 1)
1342  s->bpp *= value;
1343  s->bppcount = value;
1344  break;
1345  case TIFF_COMPR:
1346  s->compr = value;
1347  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1348  s->predictor = 0;
1349  switch (s->compr) {
1350  case TIFF_RAW:
1351  case TIFF_PACKBITS:
1352  case TIFF_LZW:
1353  case TIFF_CCITT_RLE:
1354  break;
1355  case TIFF_G3:
1356  case TIFF_G4:
1357  s->fax_opts = 0;
1358  break;
1359  case TIFF_DEFLATE:
1360  case TIFF_ADOBE_DEFLATE:
1361 #if CONFIG_ZLIB
1362  break;
1363 #else
1364  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1365  return AVERROR(ENOSYS);
1366 #endif
1367  case TIFF_JPEG:
1368  case TIFF_NEWJPEG:
1369  s->is_jpeg = 1;
1370  break;
1371  case TIFF_LZMA:
1372 #if CONFIG_LZMA
1373  break;
1374 #else
1375  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1376  return AVERROR(ENOSYS);
1377 #endif
1378  default:
1379  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1380  s->compr);
1381  return AVERROR_INVALIDDATA;
1382  }
1383  break;
1384  case TIFF_ROWSPERSTRIP:
1385  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1386  value = s->height;
1387  s->rps = FFMIN(value, s->height);
1388  break;
1389  case TIFF_STRIP_OFFS:
1390  if (count == 1) {
1391  if (value > INT_MAX) {
1393  "strippos %u too large\n", value);
1394  return AVERROR_INVALIDDATA;
1395  }
1396  s->strippos = 0;
1397  s->stripoff = value;
1398  } else
1399  s->strippos = off;
1400  s->strips = count;
1401  if (s->strips == 1)
1402  s->rps = s->height;
1403  s->sot = type;
1404  break;
1405  case TIFF_STRIP_SIZE:
1406  if (count == 1) {
1407  if (value > INT_MAX) {
1409  "stripsize %u too large\n", value);
1410  return AVERROR_INVALIDDATA;
1411  }
1412  s->stripsizesoff = 0;
1413  s->stripsize = value;
1414  s->strips = 1;
1415  } else {
1416  s->stripsizesoff = off;
1417  }
1418  s->strips = count;
1419  s->sstype = type;
1420  break;
1421  case TIFF_XRES:
1422  case TIFF_YRES:
1423  set_sar(s, tag, value, value2);
1424  break;
1425  case TIFF_TILE_OFFSETS:
1426  s->tile_offsets_offset = off;
1427  s->tile_count = count;
1428  s->is_tiled = 1;
1429  break;
1430  case TIFF_TILE_BYTE_COUNTS:
1431  s->tile_byte_counts_offset = off;
1432  break;
1433  case TIFF_TILE_LENGTH:
1434  s->tile_length = value;
1435  break;
1436  case TIFF_TILE_WIDTH:
1437  s->tile_width = value;
1438  break;
1439  case TIFF_PREDICTOR:
1440  s->predictor = value;
1441  break;
1442  case TIFF_SUB_IFDS:
1443  if (count == 1)
1444  s->sub_ifd = value;
1445  else if (count > 1)
1446  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1447  break;
1449  if (count > FF_ARRAY_ELEMS(s->dng_lut))
1450  return AVERROR_INVALIDDATA;
1451  for (int i = 0; i < count; i++)
1452  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1453  break;
1454  case DNG_BLACK_LEVEL:
1455  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1456  if (type == TIFF_RATIONAL) {
1457  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1458  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1459  if (!value2) {
1460  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1461  return AVERROR_INVALIDDATA;
1462  }
1463 
1464  s->black_level = value / value2;
1465  } else
1466  s->black_level = ff_tget(&s->gb, type, s->le);
1467  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1468  } else {
1469  s->black_level = value / value2;
1470  }
1471  break;
1472  case DNG_WHITE_LEVEL:
1473  s->white_level = value;
1474  break;
1475  case TIFF_CFA_PATTERN_DIM:
1476  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1477  ff_tget(&s->gb, type, s->le) != 2)) {
1478  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1479  return AVERROR_INVALIDDATA;
1480  }
1481  break;
1482  case TIFF_CFA_PATTERN:
1483  s->is_bayer = 1;
1484  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1485  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1486  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1487  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1488  break;
1489  case TIFF_PHOTOMETRIC:
1490  switch (value) {
1493  case TIFF_PHOTOMETRIC_RGB:
1497  case TIFF_PHOTOMETRIC_CFA:
1498  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1499  s->photometric = value;
1500  break;
1508  "PhotometricInterpretation 0x%04X",
1509  value);
1510  return AVERROR_PATCHWELCOME;
1511  default:
1512  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1513  "unknown\n", value);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  break;
1517  case TIFF_FILL_ORDER:
1518  if (value < 1 || value > 2) {
1520  "Unknown FillOrder value %d, trying default one\n", value);
1521  value = 1;
1522  }
1523  s->fill_order = value - 1;
1524  break;
1525  case TIFF_PAL: {
1526  GetByteContext pal_gb[3];
1527  off = type_sizes[type];
1528  if (count / 3 > 256 ||
1529  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1530  return AVERROR_INVALIDDATA;
1531 
1532  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1533  bytestream2_skip(&pal_gb[1], count / 3 * off);
1534  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1535 
1536  off = (type_sizes[type] - 1) << 3;
1537  if (off > 31U) {
1538  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1539  return AVERROR_INVALIDDATA;
1540  }
1541 
1542  for (i = 0; i < count / 3; i++) {
1543  uint32_t p = 0xFF000000;
1544  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1545  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1546  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1547  s->palette[i] = p;
1548  }
1549  s->palette_is_set = 1;
1550  break;
1551  }
1552  case TIFF_PLANAR:
1553  s->planar = value == 2;
1554  break;
1556  if (count != 2) {
1557  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1558  return AVERROR_INVALIDDATA;
1559  }
1560  for (i = 0; i < count; i++) {
1561  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1562  if (s->subsampling[i] <= 0) {
1563  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1564  s->subsampling[i] = 1;
1565  return AVERROR_INVALIDDATA;
1566  }
1567  }
1568  break;
1569  case TIFF_T4OPTIONS:
1570  if (s->compr == TIFF_G3)
1571  s->fax_opts = value;
1572  break;
1573  case TIFF_T6OPTIONS:
1574  if (s->compr == TIFF_G4)
1575  s->fax_opts = value;
1576  break;
1577 #define ADD_METADATA(count, name, sep)\
1578  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1579  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1580  goto end;\
1581  }
1583  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1584  break;
1586  ADD_METADATA(count, "ModelTransformationTag", NULL);
1587  break;
1588  case TIFF_MODEL_TIEPOINT:
1589  ADD_METADATA(count, "ModelTiepointTag", NULL);
1590  break;
1592  if (s->geotag_count) {
1593  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1594  return AVERROR_INVALIDDATA;
1595  }
1596  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1597  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1598  s->geotag_count = ff_tget_short(&s->gb, s->le);
1599  if (s->geotag_count > count / 4 - 1) {
1600  s->geotag_count = count / 4 - 1;
1601  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1602  }
1603  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1604  || s->geotag_count == 0) {
1605  s->geotag_count = 0;
1606  return -1;
1607  }
1608  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1609  if (!s->geotags) {
1610  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1611  s->geotag_count = 0;
1612  goto end;
1613  }
1614  for (i = 0; i < s->geotag_count; i++) {
1615  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1616  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1617  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1618 
1619  if (!s->geotags[i].type)
1620  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1621  else
1622  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1623  }
1624  break;
1626  if (count >= INT_MAX / sizeof(int64_t))
1627  return AVERROR_INVALIDDATA;
1628  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1629  return AVERROR_INVALIDDATA;
1630  dp = av_malloc_array(count, sizeof(double));
1631  if (!dp) {
1632  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1633  goto end;
1634  }
1635  for (i = 0; i < count; i++)
1636  dp[i] = ff_tget_double(&s->gb, s->le);
1637  for (i = 0; i < s->geotag_count; i++) {
1638  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1639  if (s->geotags[i].count == 0
1640  || s->geotags[i].offset + s->geotags[i].count > count) {
1641  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1642  } else if (s->geotags[i].val) {
1643  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1644  } else {
1645  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1646  if (!ap) {
1647  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1648  av_freep(&dp);
1649  return AVERROR(ENOMEM);
1650  }
1651  s->geotags[i].val = ap;
1652  }
1653  }
1654  }
1655  av_freep(&dp);
1656  break;
1657  case TIFF_GEO_ASCII_PARAMS:
1658  pos = bytestream2_tell(&s->gb);
1659  for (i = 0; i < s->geotag_count; i++) {
1660  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1661  if (s->geotags[i].count == 0
1662  || s->geotags[i].offset + s->geotags[i].count > count) {
1663  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1664  } else {
1665  char *ap;
1666 
1667  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1668  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1669  return AVERROR_INVALIDDATA;
1670  if (s->geotags[i].val)
1671  return AVERROR_INVALIDDATA;
1672  ap = av_malloc(s->geotags[i].count);
1673  if (!ap) {
1674  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1675  return AVERROR(ENOMEM);
1676  }
1677  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1678  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1679  s->geotags[i].val = ap;
1680  }
1681  }
1682  }
1683  break;
1684  case TIFF_ICC_PROFILE:
1685  gb_temp = s->gb;
1686  bytestream2_seek(&gb_temp, SEEK_SET, off);
1687 
1688  if (bytestream2_get_bytes_left(&gb_temp) < count)
1689  return AVERROR_INVALIDDATA;
1690 
1692  if (!sd)
1693  return AVERROR(ENOMEM);
1694 
1695  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1696  break;
1697  case TIFF_ARTIST:
1698  ADD_METADATA(count, "artist", NULL);
1699  break;
1700  case TIFF_COPYRIGHT:
1701  ADD_METADATA(count, "copyright", NULL);
1702  break;
1703  case TIFF_DATE:
1704  ADD_METADATA(count, "date", NULL);
1705  break;
1706  case TIFF_DOCUMENT_NAME:
1707  ADD_METADATA(count, "document_name", NULL);
1708  break;
1709  case TIFF_HOST_COMPUTER:
1710  ADD_METADATA(count, "computer", NULL);
1711  break;
1713  ADD_METADATA(count, "description", NULL);
1714  break;
1715  case TIFF_MAKE:
1716  ADD_METADATA(count, "make", NULL);
1717  break;
1718  case TIFF_MODEL:
1719  ADD_METADATA(count, "model", NULL);
1720  break;
1721  case TIFF_PAGE_NAME:
1722  ADD_METADATA(count, "page_name", NULL);
1723  break;
1724  case TIFF_PAGE_NUMBER:
1725  ADD_METADATA(count, "page_number", " / ");
1726  // need to seek back to re-read the page number
1727  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1728  // read the page number
1729  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1730  // get back to where we were before the previous seek
1731  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1732  break;
1733  case TIFF_SOFTWARE_NAME:
1734  ADD_METADATA(count, "software", NULL);
1735  break;
1736  case DNG_VERSION:
1737  if (count == 4) {
1738  unsigned int ver[4];
1739  ver[0] = ff_tget(&s->gb, type, s->le);
1740  ver[1] = ff_tget(&s->gb, type, s->le);
1741  ver[2] = ff_tget(&s->gb, type, s->le);
1742  ver[3] = ff_tget(&s->gb, type, s->le);
1743 
1744  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1745  ver[0], ver[1], ver[2], ver[3]);
1746 
1748  }
1749  break;
1750  case CINEMADNG_TIME_CODES:
1751  case CINEMADNG_FRAME_RATE:
1752  case CINEMADNG_T_STOP:
1753  case CINEMADNG_REEL_NAME:
1756  break;
1757  default:
1758  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1760  "Unknown or unsupported tag %d/0x%0X\n",
1761  tag, tag);
1762  return AVERROR_INVALIDDATA;
1763  }
1764  }
1765 end:
1766  if (s->bpp > 64U) {
1768  "This format is not supported (bpp=%d, %d components)\n",
1769  s->bpp, count);
1770  s->bpp = 0;
1771  return AVERROR_INVALIDDATA;
1772  }
1773  bytestream2_seek(&s->gb, start, SEEK_SET);
1774  return 0;
1775 }
1776 
1777 static int decode_frame(AVCodecContext *avctx,
1778  void *data, int *got_frame, AVPacket *avpkt)
1779 {
1780  TiffContext *const s = avctx->priv_data;
1781  AVFrame *const p = data;
1782  ThreadFrame frame = { .f = data };
1783  unsigned off, last_off;
1784  int le, ret, plane, planes;
1785  int i, j, entries, stride;
1786  unsigned soff, ssize;
1787  uint8_t *dst;
1788  GetByteContext stripsizes;
1789  GetByteContext stripdata;
1790  int retry_for_subifd, retry_for_page;
1791  int is_dng;
1792  int has_tile_bits, has_strip_bits;
1793 
1794  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1795 
1796  // parse image header
1797  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1798  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1799  return ret;
1800  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1801  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1802  return AVERROR_INVALIDDATA;
1803  }
1804  s->le = le;
1805  // TIFF_BPP is not a required tag and defaults to 1
1806 
1808 again:
1809  s->is_thumbnail = 0;
1810  s->bppcount = s->bpp = 1;
1812  s->compr = TIFF_RAW;
1813  s->fill_order = 0;
1814  s->white_level = 0;
1815  s->is_bayer = 0;
1816  s->is_tiled = 0;
1817  s->is_jpeg = 0;
1818  s->cur_page = 0;
1819  s->last_tag = 0;
1820 
1821  for (i = 0; i < 65536; i++)
1822  s->dng_lut[i] = i;
1823 
1824  free_geotags(s);
1825 
1826  // Reset these offsets so we can tell if they were set this frame
1827  s->stripsizesoff = s->strippos = 0;
1828  /* parse image file directory */
1829  bytestream2_seek(&s->gb, off, SEEK_SET);
1830  entries = ff_tget_short(&s->gb, le);
1831  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1832  return AVERROR_INVALIDDATA;
1833  for (i = 0; i < entries; i++) {
1834  if ((ret = tiff_decode_tag(s, p)) < 0)
1835  return ret;
1836  }
1837 
1838  if (s->get_thumbnail && !s->is_thumbnail) {
1839  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1840  return AVERROR_EOF;
1841  }
1842 
1843  /** whether we should process this IFD's SubIFD */
1844  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1845  /** whether we should process this multi-page IFD's next page */
1846  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1847 
1848  last_off = off;
1849  if (retry_for_page) {
1850  // set offset to the next IFD
1851  off = ff_tget_long(&s->gb, le);
1852  } else if (retry_for_subifd) {
1853  // set offset to the SubIFD
1854  off = s->sub_ifd;
1855  }
1856 
1857  if (retry_for_subifd || retry_for_page) {
1858  if (!off) {
1859  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1860  return AVERROR_INVALIDDATA;
1861  }
1862  if (off <= last_off) {
1863  avpriv_request_sample(s->avctx, "non increasing IFD offset");
1864  return AVERROR_INVALIDDATA;
1865  }
1866  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1867  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1868  return AVERROR_INVALIDDATA;
1869  }
1870  s->sub_ifd = 0;
1871  goto again;
1872  }
1873 
1874  /* At this point we've decided on which (Sub)IFD to process */
1875 
1876  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1877 
1878  for (i = 0; i<s->geotag_count; i++) {
1879  const char *keyname = get_geokey_name(s->geotags[i].key);
1880  if (!keyname) {
1881  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1882  continue;
1883  }
1884  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1885  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1886  continue;
1887  }
1888  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1889  if (ret<0) {
1890  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1891  return ret;
1892  }
1893  }
1894 
1895  if (is_dng) {
1896  int bps;
1897 
1898  if (s->bpp % s->bppcount)
1899  return AVERROR_INVALIDDATA;
1900  bps = s->bpp / s->bppcount;
1901  if (bps < 8 || bps > 32)
1902  return AVERROR_INVALIDDATA;
1903 
1904  if (s->white_level == 0)
1905  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
1906 
1907  if (s->white_level <= s->black_level) {
1908  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1909  s->black_level, s->white_level);
1910  return AVERROR_INVALIDDATA;
1911  }
1912 
1913  if (s->planar)
1914  return AVERROR_PATCHWELCOME;
1915  }
1916 
1917  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1918  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1919  return AVERROR_INVALIDDATA;
1920  }
1921 
1922  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
1923  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
1924 
1925  if (has_tile_bits && has_strip_bits) {
1926  av_log(avctx, AV_LOG_WARNING, "Tiled TIFF is not allowed to strip\n");
1927  }
1928 
1929  /* now we have the data and may start decoding */
1930  if ((ret = init_image(s, &frame)) < 0)
1931  return ret;
1932 
1933  if (!s->is_tiled) {
1934  if (s->strips == 1 && !s->stripsize) {
1935  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1936  s->stripsize = avpkt->size - s->stripoff;
1937  }
1938 
1939  if (s->stripsizesoff) {
1940  if (s->stripsizesoff >= (unsigned)avpkt->size)
1941  return AVERROR_INVALIDDATA;
1942  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1943  avpkt->size - s->stripsizesoff);
1944  }
1945  if (s->strippos) {
1946  if (s->strippos >= (unsigned)avpkt->size)
1947  return AVERROR_INVALIDDATA;
1948  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1949  avpkt->size - s->strippos);
1950  }
1951 
1952  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1953  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1954  return AVERROR_INVALIDDATA;
1955  }
1956  }
1957 
1961  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1963  }
1964 
1965  /* Handle DNG images with JPEG-compressed tiles */
1966 
1967  if (is_dng && s->is_tiled) {
1968  if (!s->is_jpeg) {
1969  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1970  return AVERROR_PATCHWELCOME;
1971  } else if (!s->is_bayer) {
1972  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1973  return AVERROR_PATCHWELCOME;
1974  } else {
1975  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1976  *got_frame = 1;
1977  return ret;
1978  }
1979  }
1980 
1981  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1982 
1983  planes = s->planar ? s->bppcount : 1;
1984  for (plane = 0; plane < planes; plane++) {
1985  uint8_t *five_planes = NULL;
1986  int remaining = avpkt->size;
1987  int decoded_height;
1988  stride = p->linesize[plane];
1989  dst = p->data[plane];
1991  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1992  stride = stride * 5 / 4;
1993  five_planes =
1994  dst = av_malloc(stride * s->height);
1995  if (!dst)
1996  return AVERROR(ENOMEM);
1997  }
1998  for (i = 0; i < s->height; i += s->rps) {
1999  if (i)
2000  dst += s->rps * stride;
2001  if (s->stripsizesoff)
2002  ssize = ff_tget(&stripsizes, s->sstype, le);
2003  else
2004  ssize = s->stripsize;
2005 
2006  if (s->strippos)
2007  soff = ff_tget(&stripdata, s->sot, le);
2008  else
2009  soff = s->stripoff;
2010 
2011  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2012  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2013  av_freep(&five_planes);
2014  return AVERROR_INVALIDDATA;
2015  }
2016  remaining -= ssize;
2017  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2018  FFMIN(s->rps, s->height - i))) < 0) {
2019  if (avctx->err_recognition & AV_EF_EXPLODE) {
2020  av_freep(&five_planes);
2021  return ret;
2022  }
2023  break;
2024  }
2025  }
2026  decoded_height = FFMIN(i, s->height);
2027 
2028  if (s->predictor == 2) {
2029  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2030  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2031  return AVERROR_PATCHWELCOME;
2032  }
2033  dst = five_planes ? five_planes : p->data[plane];
2034  soff = s->bpp >> 3;
2035  if (s->planar)
2036  soff = FFMAX(soff / s->bppcount, 1);
2037  ssize = s->width * soff;
2038  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2041  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2044  for (i = 0; i < decoded_height; i++) {
2045  for (j = soff; j < ssize; j += 2)
2046  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2047  dst += stride;
2048  }
2049  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2052  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2055  for (i = 0; i < decoded_height; i++) {
2056  for (j = soff; j < ssize; j += 2)
2057  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2058  dst += stride;
2059  }
2060  } else {
2061  for (i = 0; i < decoded_height; i++) {
2062  for (j = soff; j < ssize; j++)
2063  dst[j] += dst[j - soff];
2064  dst += stride;
2065  }
2066  }
2067  }
2068 
2070  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2071  dst = p->data[plane];
2072  for (i = 0; i < s->height; i++) {
2073  for (j = 0; j < stride; j++)
2074  dst[j] = c - dst[j];
2075  dst += stride;
2076  }
2077  }
2078 
2081  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2082  uint8_t *src = five_planes ? five_planes : p->data[plane];
2083  dst = p->data[plane];
2084  for (i = 0; i < s->height; i++) {
2085  for (j = 0; j < s->width; j++) {
2086  int k = 255 - src[x * j + 3];
2087  int r = (255 - src[x * j ]) * k;
2088  int g = (255 - src[x * j + 1]) * k;
2089  int b = (255 - src[x * j + 2]) * k;
2090  dst[4 * j ] = r * 257 >> 16;
2091  dst[4 * j + 1] = g * 257 >> 16;
2092  dst[4 * j + 2] = b * 257 >> 16;
2093  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2094  }
2095  src += stride;
2096  dst += p->linesize[plane];
2097  }
2098  av_freep(&five_planes);
2099  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2101  dst = p->data[plane];
2102  for (i = 0; i < s->height; i++) {
2103  for (j = 0; j < s->width; j++) {
2104  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2105  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2106  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2107  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2108  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2109  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2110  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2111  AV_WB16(dst + 8 * j + 6, 65535);
2112  }
2113  dst += p->linesize[plane];
2114  }
2115  }
2116  }
2117 
2118  if (s->planar && s->bppcount > 2) {
2119  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2120  FFSWAP(int, p->linesize[0], p->linesize[2]);
2121  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2122  FFSWAP(int, p->linesize[0], p->linesize[1]);
2123  }
2124 
2125  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2126  uint16_t *dst = (uint16_t *)p->data[0];
2127  for (i = 0; i < s->height; i++) {
2128  for (j = 0; j < s->width; j++)
2129  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2130  dst += stride / 2;
2131  }
2132  }
2133 
2134  *got_frame = 1;
2135 
2136  return avpkt->size;
2137 }
2138 
2140 {
2141  TiffContext *s = avctx->priv_data;
2142  const AVCodec *codec;
2143  int ret;
2144 
2145  s->width = 0;
2146  s->height = 0;
2147  s->subsampling[0] =
2148  s->subsampling[1] = 1;
2149  s->avctx = avctx;
2150  ff_lzw_decode_open(&s->lzw);
2151  if (!s->lzw)
2152  return AVERROR(ENOMEM);
2154 
2155  /* Allocate JPEG frame */
2156  s->jpgframe = av_frame_alloc();
2157  if (!s->jpgframe)
2158  return AVERROR(ENOMEM);
2159 
2160  /* Prepare everything needed for JPEG decoding */
2162  if (!codec)
2163  return AVERROR_BUG;
2164  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2165  if (!s->avctx_mjpeg)
2166  return AVERROR(ENOMEM);
2167  s->avctx_mjpeg->flags = avctx->flags;
2168  s->avctx_mjpeg->flags2 = avctx->flags2;
2169  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2170  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2171  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2172  if (ret < 0) {
2173  return ret;
2174  }
2175 
2176  return 0;
2177 }
2178 
2179 static av_cold int tiff_end(AVCodecContext *avctx)
2180 {
2181  TiffContext *const s = avctx->priv_data;
2182 
2183  free_geotags(s);
2184 
2185  ff_lzw_decode_close(&s->lzw);
2186  av_freep(&s->deinvert_buf);
2187  s->deinvert_buf_size = 0;
2188  av_freep(&s->yuv_line);
2189  s->yuv_line_size = 0;
2190  av_freep(&s->fax_buffer);
2191  s->fax_buffer_size = 0;
2192  av_frame_free(&s->jpgframe);
2194  return 0;
2195 }
2196 
2197 #define OFFSET(x) offsetof(TiffContext, x)
2198 static const AVOption tiff_options[] = {
2199  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2200  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2201  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2202  { NULL },
2203 };
2204 
2205 static const AVClass tiff_decoder_class = {
2206  .class_name = "TIFF decoder",
2207  .item_name = av_default_item_name,
2208  .option = tiff_options,
2209  .version = LIBAVUTIL_VERSION_INT,
2210 };
2211 
2213  .name = "tiff",
2214  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2215  .type = AVMEDIA_TYPE_VIDEO,
2216  .id = AV_CODEC_ID_TIFF,
2217  .priv_data_size = sizeof(TiffContext),
2218  .init = tiff_init,
2219  .close = tiff_end,
2220  .decode = decode_frame,
2221  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2222  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2223  .priv_class = &tiff_decoder_class,
2224 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
Definition: tiff.h:64
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:806
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
#define NULL
Definition: coverity.c:32
Definition: tiff.h:121
int offset
Definition: tiff.h:211
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:813
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values ...
Definition: tiff.h:183
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1716
AVOption.
Definition: opt.h:248
int tile_offsets_offset
Definition: tiff.c:100
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:492
int fill_order
Definition: tiff.c:79
unsigned int bpp
Definition: tiff.c:69
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
int geotag_count
Definition: tiff.c:113
uint16_t dng_lut[65536]
Definition: tiff.c:88
uint32_t res[4]
Definition: tiff.c:80
Definition: tiff.h:63
int sstype
Definition: tiff.c:93
const char * desc
Definition: libsvtav1.c:79
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVFrame * f
Definition: thread.h:35
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
uint8_t pattern[4]
Definition: tiff.c:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
const char * g
Definition: vf_curves.c:115
const char *const name
Definition: tiff.h:217
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
const uint8_t ff_reverse[256]
Definition: reverse.c:23
#define avpriv_request_sample(...)
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:262
Definition: tiff.h:57
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:263
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
unsigned white_level
Definition: tiff.c:87
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:163
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
GLint GLenum type
Definition: opengl_enc.c:104
static void free_geotags(TiffContext *const s)
Definition: tiff.c:122
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
const char * key
uint8_t * fax_buffer
Definition: tiff.c:110
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:260
enum TiffType tiff_type
Definition: tiff.c:67
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:91
unsigned int yuv_line_size
Definition: tiff.c:109
AVCodec.
Definition: codec.h:190
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:91
Definition: tiff.h:125
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
Definition: tiff.h:124
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1777
Macro definitions for various function/variable attributes.
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:557
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1247
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int deinvert_buf_size
Definition: tiff.c:107
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int get_subimage
Definition: tiff.c:63
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5) ...
Definition: tiff.c:785
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
char * val
Definition: tiff.h:212
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2139
int is_thumbnail
Definition: tiff.c:81
Structure to hold side data for an AVFrame.
Definition: frame.h:214
int get_thumbnail
Definition: tiff.c:65
uint8_t * data
Definition: packet.h:363
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1597
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:266
int stripoff
Definition: tiff.c:95
#define AVERROR_EOF
End of file.
Definition: error.h:55
bitstream reader API header.
AVDictionary * metadata
metadata.
Definition: frame.h:594
int tile_count
Definition: tiff.c:102
ptrdiff_t size
Definition: opengl_enc.c:100
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
LZWState * lzw
Definition: tiff.c:96
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:426
#define av_log(a,...)
Definition: tiff.h:78
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
int planar
Definition: tiff.c:75
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
static const struct @323 planes[]
Definition: lzw.c:46
int height
Definition: tiff.c:68
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
error code definitions
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:538
unsigned int fax_buffer_size
Definition: tiff.c:111
enum TiffGeoTagKey key
Definition: tiff.h:208
int sot
Definition: tiff.c:94
TIFF data tables.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
#define pv
Definition: regdef.h:60
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2179
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:669
const char * r
Definition: vf_curves.c:114
unsigned int pos
Definition: spdifenc.c:410
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:336
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
uint16_t get_page
Definition: tiff.c:64
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1228
int width
Definition: tiff.c:68
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:522
int strips
Definition: tiff.c:93
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:381
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
#define FFMAX(a, b)
Definition: common.h:94
uint8_t * yuv_line
Definition: tiff.c:108
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int predictor
Definition: tiff.c:78
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
enum TiffPhotometric photometric
Definition: tiff.c:74
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int stripsize
Definition: tiff.c:95
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
static const AVOption tiff_options[]
Definition: tiff.c:2198
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define FFMIN(a, b)
Definition: common.h:96
uint32_t sub_ifd
Definition: tiff.c:90
int le
Definition: tiff.c:72
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1042
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:172
int width
picture width / height.
Definition: avcodec.h:699
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1729
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
uint8_t w
Definition: llviddspenc.c:38
int rps
Definition: tiff.c:93
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:362
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:117
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
unsigned last_tag
Definition: tiff.c:82
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int is_bayer
Definition: tiff.c:84
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
int is_tiled
Definition: tiff.c:99
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:281
int is_jpeg
Definition: tiff.c:104
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:423
#define FF_ARRAY_ELEMS(a)
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
if(ret)
int palette_is_set
Definition: tiff.c:71
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:606
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:425
uint32_t palette[256]
Definition: tiff.c:70
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
Definition: tiff.h:51
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
static const char * get_geokey_name(int key)
Definition: tiff.c:138
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:120
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:187
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
enum TiffCompr compr
Definition: tiff.c:73
unsigned int bppcount
Definition: tiff.c:69
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
main external API structure.
Definition: avcodec.h:526
Definition: tiff.h:123
uint8_t * data
Definition: frame.h:216
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVCodecContext * avctx
Definition: tiff.c:56
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int subsampling[2]
Definition: tiff.c:76
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
#define snprintf
Definition: snprintf.h:34
uint16_t cur_page
Definition: tiff.c:91
static int get_geokey_type(int key)
Definition: tiff.c:148
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:936
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
int tile_length
Definition: tiff.c:101
int strippos
Definition: tiff.c:95
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:876
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
int count
Definition: tiff.h:210
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
enum TiffTags type
Definition: tiff.h:209
LZW decoding routines.
#define OFFSET(x)
Definition: tiff.c:2197
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:967
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int stripsizesoff
Definition: tiff.c:95
Y , 8bpp.
Definition: pixfmt.h:74
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
int tile_byte_counts_offset
Definition: tiff.c:100
uint8_t * deinvert_buf
Definition: tiff.c:106
common internal api header.
static char * get_geokey_val(int key, int val)
Definition: tiff.c:172
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:240
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:261
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
AVFrame * jpgframe
Definition: tiff.c:61
int den
Denominator.
Definition: rational.h:60
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
unsigned bps
Definition: movenc.c:1598
#define RET_GEOKEY_VAL(TYPE, array)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
int tile_width
Definition: tiff.c:101
void * priv_data
Definition: avcodec.h:553
#define av_free(p)
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
int len
Digital Negative (DNG) image.
Definition: tiff.h:41
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
int fax_opts
Definition: tiff.c:77
static const AVClass tiff_decoder_class
Definition: tiff.c:2205
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:133
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
unsigned black_level
Definition: tiff.c:86
int height
Definition: frame.h:366
#define av_freep(p)
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1055
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
AVCodec ff_tiff_decoder
Definition: tiff.c:2212
#define av_always_inline
Definition: attributes.h:45
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext gb
Definition: tiff.c:57
#define stride
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:158
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffGeoTag * geotags
Definition: tiff.c:114
MJPEG decoder.
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
#define ADD_METADATA(count, name, sep)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
Definition: tiff.h:74
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
CCITT Fax Group 3 and 4 decompression.
int i
Definition: input.c:407
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:349
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
const char * name
Definition: opengl_enc.c:102