FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82 
83  int is_bayer;
85  unsigned black_level;
86  unsigned white_level;
87  uint16_t dng_lut[65536];
88 
89  uint32_t sub_ifd;
90  uint16_t cur_page;
91 
92  int strips, rps, sstype;
93  int sot;
96 
97  /* Tile support */
98  int is_tiled;
102 
103  int is_jpeg;
104 
108  unsigned int yuv_line_size;
110  unsigned int fax_buffer_size;
111 
114 } TiffContext;
115 
117  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
118  s->tiff_type = tiff_type;
119 }
120 
121 static void free_geotags(TiffContext *const s)
122 {
123  int i;
124  for (i = 0; i < s->geotag_count; i++) {
125  if (s->geotags[i].val)
126  av_freep(&s->geotags[i].val);
127  }
128  av_freep(&s->geotags);
129  s->geotag_count = 0;
130 }
131 
132 #define RET_GEOKEY(TYPE, array, element)\
133  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
134  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
135  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
136 
137 static const char *get_geokey_name(int key)
138 {
139  RET_GEOKEY(VERT, vert, name);
140  RET_GEOKEY(PROJ, proj, name);
141  RET_GEOKEY(GEOG, geog, name);
142  RET_GEOKEY(CONF, conf, name);
143 
144  return NULL;
145 }
146 
147 static int get_geokey_type(int key)
148 {
149  RET_GEOKEY(VERT, vert, type);
150  RET_GEOKEY(PROJ, proj, type);
151  RET_GEOKEY(GEOG, geog, type);
152  RET_GEOKEY(CONF, conf, type);
153 
154  return AVERROR_INVALIDDATA;
155 }
156 
157 static int cmp_id_key(const void *id, const void *k)
158 {
159  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
160 }
161 
162 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
163 {
164  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
165  if(r)
166  return r->name;
167 
168  return NULL;
169 }
170 
171 static char *get_geokey_val(int key, int val)
172 {
173  char *ap;
174 
175  if (val == TIFF_GEO_KEY_UNDEFINED)
176  return av_strdup("undefined");
177  if (val == TIFF_GEO_KEY_USER_DEFINED)
178  return av_strdup("User-Defined");
179 
180 #define RET_GEOKEY_VAL(TYPE, array)\
181  if (val >= TIFF_##TYPE##_OFFSET &&\
182  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
183  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
184 
185  switch (key) {
187  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
188  break;
190  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
191  break;
195  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
196  break;
199  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
200  break;
202  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
203  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
204  break;
206  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
207  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
208  break;
210  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
211  break;
213  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
214  break;
217  if(ap) return ap;
218  break;
221  if(ap) return ap;
222  break;
224  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
225  break;
227  RET_GEOKEY_VAL(VERT_CS, vert_cs);
228  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
229  break;
230 
231  }
232 
233  ap = av_malloc(14);
234  if (ap)
235  snprintf(ap, 14, "Unknown-%d", val);
236  return ap;
237 }
238 
239 static char *doubles2str(double *dp, int count, const char *sep)
240 {
241  int i;
242  char *ap, *ap0;
243  uint64_t component_len;
244  if (!sep) sep = ", ";
245  component_len = 24LL + strlen(sep);
246  if (count >= (INT_MAX - 1)/component_len)
247  return NULL;
248  ap = av_malloc(component_len * count + 1);
249  if (!ap)
250  return NULL;
251  ap0 = ap;
252  ap[0] = '\0';
253  for (i = 0; i < count; i++) {
254  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
255  if(l >= component_len) {
256  av_free(ap0);
257  return NULL;
258  }
259  ap += l;
260  }
261  ap0[strlen(ap0) - strlen(sep)] = '\0';
262  return ap0;
263 }
264 
265 static int add_metadata(int count, int type,
266  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
267 {
268  switch(type) {
269  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
270  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
271  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
272  default : return AVERROR_INVALIDDATA;
273  };
274 }
275 
276 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
277  const uint8_t *src, int src_stride, int width, int height,
278  int is_single_comp, int is_u16);
279 
281  unsigned int bpp, uint8_t* dst,
282  int usePtr, const uint8_t *src,
283  uint8_t c, int width, int offset)
284 {
285  switch (bpp) {
286  case 1:
287  while (--width >= 0) {
288  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
289  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
290  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
291  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
292  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
293  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
294  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
295  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
296  }
297  break;
298  case 2:
299  while (--width >= 0) {
300  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
301  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
302  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
303  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
304  }
305  break;
306  case 4:
307  while (--width >= 0) {
308  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
309  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
310  }
311  break;
312  case 10:
313  case 12:
314  case 14: {
315  uint16_t *dst16 = (uint16_t *)dst;
316  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
317  uint8_t shift = is_dng ? 0 : 16 - bpp;
319 
320  init_get_bits8(&gb, src, width);
321  for (int i = 0; i < s->width; i++) {
322  dst16[i] = get_bits(&gb, bpp) << shift;
323  }
324  }
325  break;
326  default:
327  if (usePtr) {
328  memcpy(dst + offset, src, width);
329  } else {
330  memset(dst + offset, c, width);
331  }
332  }
333 }
334 
335 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
336 {
337  int i;
338 
340  if (!s->deinvert_buf)
341  return AVERROR(ENOMEM);
342  for (i = 0; i < size; i++)
343  s->deinvert_buf[i] = ff_reverse[src[i]];
344 
345  return 0;
346 }
347 
348 static void unpack_gray(TiffContext *s, AVFrame *p,
349  const uint8_t *src, int lnum, int width, int bpp)
350 {
352  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
353 
354  init_get_bits8(&gb, src, width);
355 
356  for (int i = 0; i < s->width; i++) {
357  dst[i] = get_bits(&gb, bpp);
358  }
359 }
360 
361 static void unpack_yuv(TiffContext *s, AVFrame *p,
362  const uint8_t *src, int lnum)
363 {
364  int i, j, k;
365  int w = (s->width - 1) / s->subsampling[0] + 1;
366  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
367  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
368  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
369  for (i = 0; i < w; i++) {
370  for (j = 0; j < s->subsampling[1]; j++)
371  for (k = 0; k < s->subsampling[0]; k++)
372  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
373  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
374  *pu++ = *src++;
375  *pv++ = *src++;
376  }
377  }else{
378  for (i = 0; i < w; i++) {
379  for (j = 0; j < s->subsampling[1]; j++)
380  for (k = 0; k < s->subsampling[0]; k++)
381  p->data[0][(lnum + j) * p->linesize[0] +
382  i * s->subsampling[0] + k] = *src++;
383  *pu++ = *src++;
384  *pv++ = *src++;
385  }
386  }
387 }
388 
389 #if CONFIG_ZLIB
390 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
391  int size)
392 {
393  z_stream zstream = { 0 };
394  int zret;
395 
396  zstream.next_in = src;
397  zstream.avail_in = size;
398  zstream.next_out = dst;
399  zstream.avail_out = *len;
400  zret = inflateInit(&zstream);
401  if (zret != Z_OK) {
402  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
403  return zret;
404  }
405  zret = inflate(&zstream, Z_SYNC_FLUSH);
406  inflateEnd(&zstream);
407  *len = zstream.total_out;
408  return zret == Z_STREAM_END ? Z_OK : zret;
409 }
410 
411 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
412  const uint8_t *src, int size, int width, int lines,
413  int strip_start, int is_yuv)
414 {
415  uint8_t *zbuf;
416  unsigned long outlen;
417  int ret, line;
418  outlen = width * lines;
419  zbuf = av_malloc(outlen);
420  if (!zbuf)
421  return AVERROR(ENOMEM);
422  if (s->fill_order) {
423  if ((ret = deinvert_buffer(s, src, size)) < 0) {
424  av_free(zbuf);
425  return ret;
426  }
427  src = s->deinvert_buf;
428  }
429  ret = tiff_uncompress(zbuf, &outlen, src, size);
430  if (ret != Z_OK) {
432  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
433  (unsigned long)width * lines, ret);
434  av_free(zbuf);
435  return AVERROR_UNKNOWN;
436  }
437  src = zbuf;
438  for (line = 0; line < lines; line++) {
439  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
440  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
441  } else {
442  memcpy(dst, src, width);
443  }
444  if (is_yuv) {
445  unpack_yuv(s, p, dst, strip_start + line);
446  line += s->subsampling[1] - 1;
447  }
448  dst += stride;
449  src += width;
450  }
451  av_free(zbuf);
452  return 0;
453 }
454 #endif
455 
456 #if CONFIG_LZMA
457 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
458  int size)
459 {
460  lzma_stream stream = LZMA_STREAM_INIT;
461  lzma_ret ret;
462 
463  stream.next_in = (uint8_t *)src;
464  stream.avail_in = size;
465  stream.next_out = dst;
466  stream.avail_out = *len;
467  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
468  if (ret != LZMA_OK) {
469  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
470  return ret;
471  }
472  ret = lzma_code(&stream, LZMA_RUN);
473  lzma_end(&stream);
474  *len = stream.total_out;
475  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
476 }
477 
478 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
479  const uint8_t *src, int size, int width, int lines,
480  int strip_start, int is_yuv)
481 {
482  uint64_t outlen = width * (uint64_t)lines;
483  int ret, line;
484  uint8_t *buf = av_malloc(outlen);
485  if (!buf)
486  return AVERROR(ENOMEM);
487  if (s->fill_order) {
488  if ((ret = deinvert_buffer(s, src, size)) < 0) {
489  av_free(buf);
490  return ret;
491  }
492  src = s->deinvert_buf;
493  }
494  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
495  if (ret != LZMA_OK) {
497  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
498  (uint64_t)width * lines, ret);
499  av_free(buf);
500  return AVERROR_UNKNOWN;
501  }
502  src = buf;
503  for (line = 0; line < lines; line++) {
504  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
505  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
506  } else {
507  memcpy(dst, src, width);
508  }
509  if (is_yuv) {
510  unpack_yuv(s, p, dst, strip_start + line);
511  line += s->subsampling[1] - 1;
512  }
513  dst += stride;
514  src += width;
515  }
516  av_free(buf);
517  return 0;
518 }
519 #endif
520 
521 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
522  const uint8_t *src, int size, int width, int lines)
523 {
524  int i, ret = 0;
525  int line;
526  uint8_t *src2;
527 
529  src2 = s->fax_buffer;
530 
531  if (!src2) {
533  "Error allocating temporary buffer\n");
534  return AVERROR(ENOMEM);
535  }
536 
537  if (!s->fill_order) {
538  memcpy(src2, src, size);
539  } else {
540  for (i = 0; i < size; i++)
541  src2[i] = ff_reverse[src[i]];
542  }
543  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
544  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
545  s->compr, s->fax_opts);
546  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
547  for (line = 0; line < lines; line++) {
548  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
549  dst += stride;
550  }
551  return ret;
552 }
553 
555 
556 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
557  const uint8_t *src, int size, int strip_start, int lines)
558 {
559  PutByteContext pb;
560  int c, line, pixels, code, ret;
561  const uint8_t *ssrc = src;
562  int width = ((s->width * s->bpp) + 7) >> 3;
564  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
565  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
566  desc->nb_components >= 3;
567  int is_dng;
568 
569  if (s->planar)
570  width /= s->bppcount;
571 
572  if (size <= 0)
573  return AVERROR_INVALIDDATA;
574 
575  if (is_yuv) {
576  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
577  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
578  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
579  if (s->yuv_line == NULL) {
580  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
581  return AVERROR(ENOMEM);
582  }
583  dst = s->yuv_line;
584  stride = 0;
585 
586  width = (s->width - 1) / s->subsampling[0] + 1;
587  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
588  av_assert0(width <= bytes_per_row);
589  av_assert0(s->bpp == 24);
590  }
591  if (s->is_bayer) {
592  av_assert0(width == (s->bpp * s->width + 7) >> 3);
593  }
594  if (p->format == AV_PIX_FMT_GRAY12) {
596  if (s->yuv_line == NULL) {
597  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
598  return AVERROR(ENOMEM);
599  }
600  dst = s->yuv_line;
601  stride = 0;
602  }
603 
604  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
605 #if CONFIG_ZLIB
606  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
607  strip_start, is_yuv);
608 #else
610  "zlib support not enabled, "
611  "deflate compression not supported\n");
612  return AVERROR(ENOSYS);
613 #endif
614  }
615  if (s->compr == TIFF_LZMA) {
616 #if CONFIG_LZMA
617  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
618  strip_start, is_yuv);
619 #else
621  "LZMA support not enabled\n");
622  return AVERROR(ENOSYS);
623 #endif
624  }
625  if (s->compr == TIFF_LZW) {
626  if (s->fill_order) {
627  if ((ret = deinvert_buffer(s, src, size)) < 0)
628  return ret;
629  ssrc = src = s->deinvert_buf;
630  }
631  if (size > 1 && !src[0] && (src[1]&1)) {
632  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
633  }
634  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
635  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
636  return ret;
637  }
638  for (line = 0; line < lines; line++) {
639  pixels = ff_lzw_decode(s->lzw, dst, width);
640  if (pixels < width) {
641  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
642  pixels, width);
643  return AVERROR_INVALIDDATA;
644  }
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
647  if (is_yuv) {
648  unpack_yuv(s, p, dst, strip_start + line);
649  line += s->subsampling[1] - 1;
650  } else if (p->format == AV_PIX_FMT_GRAY12) {
651  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
652  }
653  dst += stride;
654  }
655  return 0;
656  }
657  if (s->compr == TIFF_CCITT_RLE ||
658  s->compr == TIFF_G3 ||
659  s->compr == TIFF_G4) {
660  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
661  return AVERROR_INVALIDDATA;
662 
663  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
664  }
665 
666  bytestream2_init(&s->gb, src, size);
667  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
668 
669  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
670 
671  /* Decode JPEG-encoded DNGs with strips */
672  if (s->compr == TIFF_NEWJPEG && is_dng) {
673  if (s->strips > 1) {
674  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
678  return ret;
679  return 0;
680  }
681 
682  if (is_dng && stride == 0)
683  return AVERROR_INVALIDDATA;
684 
685  for (line = 0; line < lines; line++) {
686  if (src - ssrc > size) {
687  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
688  return AVERROR_INVALIDDATA;
689  }
690 
691  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
692  break;
693  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
694  switch (s->compr) {
695  case TIFF_RAW:
696  if (ssrc + size - src < width)
697  return AVERROR_INVALIDDATA;
698 
699  if (!s->fill_order) {
700  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
701  dst, 1, src, 0, width, 0);
702  } else {
703  int i;
704  for (i = 0; i < width; i++)
705  dst[i] = ff_reverse[src[i]];
706  }
707 
708  /* Color processing for DNG images with uncompressed strips (non-tiled) */
709  if (is_dng) {
710  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
711 
712  is_u16 = (s->bpp > 8);
713  pixel_size_bits = (is_u16 ? 16 : 8);
714  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
715 
716  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
717  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
718  dng_blit(s,
719  dst,
720  0, // no stride, only 1 line
721  dst,
722  0, // no stride, only 1 line
723  elements,
724  1,
725  0, // single-component variation is only preset in JPEG-encoded DNGs
726  is_u16);
727  }
728 
729  src += width;
730  break;
731  case TIFF_PACKBITS:
732  for (pixels = 0; pixels < width;) {
733  if (ssrc + size - src < 2) {
734  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
735  return AVERROR_INVALIDDATA;
736  }
737  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
738  if (code >= 0) {
739  code++;
740  if (pixels + code > width ||
741  ssrc + size - src < code) {
743  "Copy went out of bounds\n");
744  return AVERROR_INVALIDDATA;
745  }
747  dst, 1, src, 0, code, pixels);
748  src += code;
749  pixels += code;
750  } else if (code != -128) { // -127..-1
751  code = (-code) + 1;
752  if (pixels + code > width) {
754  "Run went out of bounds\n");
755  return AVERROR_INVALIDDATA;
756  }
757  c = *src++;
759  dst, 0, NULL, c, code, pixels);
760  pixels += code;
761  }
762  }
763  if (s->fill_order) {
764  int i;
765  for (i = 0; i < width; i++)
766  dst[i] = ff_reverse[dst[i]];
767  }
768  break;
769  }
770  if (is_yuv) {
771  unpack_yuv(s, p, dst, strip_start + line);
772  line += s->subsampling[1] - 1;
773  } else if (p->format == AV_PIX_FMT_GRAY12) {
774  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
775  }
776  dst += stride;
777  }
778  return 0;
779 }
780 
781 /**
782  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
783  */
784 static uint16_t av_always_inline dng_process_color16(uint16_t value,
785  const uint16_t *lut,
786  uint16_t black_level,
787  float scale_factor) {
788  float value_norm;
789 
790  // Lookup table lookup
791  if (lut)
792  value = lut[value];
793 
794  // Black level subtraction
795  value = av_clip_uint16_c((unsigned)value - black_level);
796 
797  // Color scaling
798  value_norm = (float)value * scale_factor;
799 
800  value = av_clip_uint16_c(value_norm * 65535);
801 
802  return value;
803 }
804 
805 static uint16_t av_always_inline dng_process_color8(uint16_t value,
806  const uint16_t *lut,
807  uint16_t black_level,
808  float scale_factor) {
809  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
810 }
811 
812 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
813  const uint8_t *src, int src_stride,
814  int width, int height, int is_single_comp, int is_u16)
815 {
816  int line, col;
817  float scale_factor;
818 
819  scale_factor = 1.0f / (s->white_level - s->black_level);
820 
821  if (is_single_comp) {
822  if (!is_u16)
823  return; /* <= 8bpp unsupported */
824 
825  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
826  (split vertically in the middle). */
827  for (line = 0; line < height / 2; line++) {
828  uint16_t *dst_u16 = (uint16_t *)dst;
829  uint16_t *src_u16 = (uint16_t *)src;
830 
831  /* Blit first half of input row row to initial row of output */
832  for (col = 0; col < width; col++)
833  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
834 
835  /* Advance the destination pointer by a row (source pointer remains in the same place) */
836  dst += dst_stride * sizeof(uint16_t);
837  dst_u16 = (uint16_t *)dst;
838 
839  /* Blit second half of input row row to next row of output */
840  for (col = 0; col < width; col++)
841  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
842 
843  dst += dst_stride * sizeof(uint16_t);
844  src += src_stride * sizeof(uint16_t);
845  }
846  } else {
847  /* Input and output image are the same size and the MJpeg decoder has done per-component
848  deinterleaving, so blitting here is straightforward. */
849  if (is_u16) {
850  for (line = 0; line < height; line++) {
851  uint16_t *dst_u16 = (uint16_t *)dst;
852  uint16_t *src_u16 = (uint16_t *)src;
853 
854  for (col = 0; col < width; col++)
855  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
856 
857  dst += dst_stride * sizeof(uint16_t);
858  src += src_stride * sizeof(uint16_t);
859  }
860  } else {
861  for (line = 0; line < height; line++) {
862  uint8_t *dst_u8 = dst;
863  const uint8_t *src_u8 = src;
864 
865  for (col = 0; col < width; col++)
866  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut, s->black_level, scale_factor);
867 
868  dst += dst_stride;
869  src += src_stride;
870  }
871  }
872  }
873 }
874 
875 static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
876  int tile_byte_count, int dst_x, int dst_y, int w, int h)
877 {
878  TiffContext *s = avctx->priv_data;
879  AVPacket jpkt;
880  uint8_t *dst_data, *src_data;
881  uint32_t dst_offset; /* offset from dst buffer in pixels */
882  int is_single_comp, is_u16, pixel_size;
883  int ret;
884 
885  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
886  return AVERROR_INVALIDDATA;
887 
888  /* Prepare a packet and send to the MJPEG decoder */
889  av_init_packet(&jpkt);
890  jpkt.data = (uint8_t*)s->gb.buffer;
891  jpkt.size = tile_byte_count;
892 
893  if (s->is_bayer) {
894  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
895  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
896  image or not from its own data (and we need that information when decoding it). */
897  mjpegdecctx->bayer = 1;
898  }
899 
900  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
901  if (ret < 0) {
902  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
903  return ret;
904  }
905 
907  if (ret < 0) {
908  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
909 
910  /* Normally skip, error if explode */
911  if (avctx->err_recognition & AV_EF_EXPLODE)
912  return AVERROR_INVALIDDATA;
913  else
914  return 0;
915  }
916 
917  is_u16 = (s->bpp > 8);
918 
919  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
920 
921  /* See dng_blit for explanation */
922  if (s->avctx_mjpeg->width == w * 2 &&
923  s->avctx_mjpeg->height == h / 2 &&
925  is_single_comp = 1;
926  } else if (s->avctx_mjpeg->width == w &&
927  s->avctx_mjpeg->height == h &&
929  ) {
930  is_single_comp = 0;
931  } else
932  return AVERROR_INVALIDDATA;
933 
934  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
935 
936  if (is_single_comp && !is_u16) {
937  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
939  return AVERROR_PATCHWELCOME;
940  }
941 
942  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
943  dst_data = frame->data[0] + dst_offset * pixel_size;
944  src_data = s->jpgframe->data[0];
945 
946  dng_blit(s,
947  dst_data,
948  frame->linesize[0] / pixel_size,
949  src_data,
950  s->jpgframe->linesize[0] / pixel_size,
951  w,
952  h,
953  is_single_comp,
954  is_u16);
955 
957 
958  return 0;
959 }
960 
961 static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
962 {
963  TiffContext *s = avctx->priv_data;
964  int tile_idx;
965  int tile_offset_offset, tile_offset;
966  int tile_byte_count_offset, tile_byte_count;
967  int tile_count_x, tile_count_y;
968  int tile_width, tile_length;
969  int has_width_leftover, has_height_leftover;
970  int tile_x = 0, tile_y = 0;
971  int pos_x = 0, pos_y = 0;
972  int ret;
973 
974  s->jpgframe->width = s->tile_width;
975  s->jpgframe->height = s->tile_length;
976 
977  s->avctx_mjpeg->width = s->tile_width;
978  s->avctx_mjpeg->height = s->tile_length;
979 
980  has_width_leftover = (s->width % s->tile_width != 0);
981  has_height_leftover = (s->height % s->tile_length != 0);
982 
983  /* Calculate tile counts (round up) */
984  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
985  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
986 
987  /* Iterate over the number of tiles */
988  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
989  tile_x = tile_idx % tile_count_x;
990  tile_y = tile_idx / tile_count_x;
991 
992  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
993  tile_width = s->width % s->tile_width;
994  else
995  tile_width = s->tile_width;
996 
997  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
998  tile_length = s->height % s->tile_length;
999  else
1000  tile_length = s->tile_length;
1001 
1002  /* Read tile offset */
1003  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1004  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1005  tile_offset = ff_tget_long(&s->gb, s->le);
1006 
1007  /* Read tile byte size */
1008  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1009  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1010  tile_byte_count = ff_tget_long(&s->gb, s->le);
1011 
1012  /* Seek to tile data */
1013  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1014 
1015  /* Decode JPEG tile and copy it in the reference frame */
1016  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1017 
1018  if (ret < 0)
1019  return ret;
1020 
1021  /* Advance current positions */
1022  pos_x += tile_width;
1023  if (tile_x == tile_count_x - 1) { // If on the right edge
1024  pos_x = 0;
1025  pos_y += tile_length;
1026  }
1027  }
1028 
1029  /* Frame is ready to be output */
1030  frame->pict_type = AV_PICTURE_TYPE_I;
1031  frame->key_frame = 1;
1032 
1033  return avpkt->size;
1034 }
1035 
1036 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
1037 {
1038  TiffContext *s = avctx->priv_data;
1039 
1040  s->jpgframe->width = s->width;
1041  s->jpgframe->height = s->height;
1042 
1043  s->avctx_mjpeg->width = s->width;
1044  s->avctx_mjpeg->height = s->height;
1045 
1046  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1047 }
1048 
1049 static int init_image(TiffContext *s, ThreadFrame *frame)
1050 {
1051  int ret;
1052  int create_gray_palette = 0;
1053 
1054  // make sure there is no aliasing in the following switch
1055  if (s->bpp >= 100 || s->bppcount >= 10) {
1057  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1058  s->bpp, s->bppcount);
1059  return AVERROR_INVALIDDATA;
1060  }
1061 
1062  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1063  case 11:
1064  if (!s->palette_is_set) {
1066  break;
1067  }
1068  case 21:
1069  case 41:
1071  if (!s->palette_is_set) {
1072  create_gray_palette = 1;
1073  }
1074  break;
1075  case 81:
1077  break;
1078  case 121:
1080  break;
1081  case 10081:
1082  switch (AV_RL32(s->pattern)) {
1083  case 0x02010100:
1085  break;
1086  case 0x00010102:
1088  break;
1089  case 0x01000201:
1091  break;
1092  case 0x01020001:
1094  break;
1095  default:
1096  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1097  AV_RL32(s->pattern));
1098  return AVERROR_PATCHWELCOME;
1099  }
1100  break;
1101  case 10101:
1102  case 10121:
1103  case 10141:
1104  case 10161:
1105  switch (AV_RL32(s->pattern)) {
1106  case 0x02010100:
1108  break;
1109  case 0x00010102:
1111  break;
1112  case 0x01000201:
1114  break;
1115  case 0x01020001:
1117  break;
1118  default:
1119  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1120  AV_RL32(s->pattern));
1121  return AVERROR_PATCHWELCOME;
1122  }
1123  break;
1124  case 243:
1125  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1126  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1128  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1130  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1132  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1134  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1136  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1138  } else {
1139  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1140  return AVERROR_PATCHWELCOME;
1141  }
1142  } else
1144  break;
1145  case 161:
1147  break;
1148  case 162:
1150  break;
1151  case 322:
1153  break;
1154  case 324:
1156  break;
1157  case 405:
1160  else {
1162  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1163  return AVERROR_PATCHWELCOME;
1164  }
1165  break;
1166  case 483:
1168  break;
1169  case 644:
1171  break;
1172  case 1243:
1174  break;
1175  case 1324:
1177  break;
1178  case 1483:
1180  break;
1181  case 1644:
1183  break;
1184  default:
1186  "This format is not supported (bpp=%d, bppcount=%d)\n",
1187  s->bpp, s->bppcount);
1188  return AVERROR_INVALIDDATA;
1189  }
1190 
1191  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1193  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1194  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1195  desc->nb_components < 3) {
1196  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1197  return AVERROR_INVALIDDATA;
1198  }
1199  }
1200 
1201  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1202  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1203  if (ret < 0)
1204  return ret;
1205  }
1206  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1207  return ret;
1208  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1209  if (!create_gray_palette)
1210  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1211  else {
1212  /* make default grayscale pal */
1213  int i;
1214  uint32_t *pal = (uint32_t *)frame->f->data[1];
1215  for (i = 0; i < 1<<s->bpp; i++)
1216  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1217  }
1218  }
1219  return 0;
1220 }
1221 
1222 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1223 {
1224  int offset = tag == TIFF_YRES ? 2 : 0;
1225  s->res[offset++] = num;
1226  s->res[offset] = den;
1227  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1228  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1229  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1230  if (num > INT64_MAX || den > INT64_MAX) {
1231  num = num >> 1;
1232  den = den >> 1;
1233  }
1235  num, den, INT32_MAX);
1236  if (!s->avctx->sample_aspect_ratio.den)
1237  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1238  }
1239 }
1240 
1241 static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
1242 {
1243  AVFrameSideData *sd;
1244  GetByteContext gb_temp;
1245  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1246  int i, start;
1247  int pos;
1248  int ret;
1249  double *dp;
1250 
1251  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1252  if (ret < 0) {
1253  goto end;
1254  }
1255 
1256  off = bytestream2_tell(&s->gb);
1257  if (count == 1) {
1258  switch (type) {
1259  case TIFF_BYTE:
1260  case TIFF_SHORT:
1261  case TIFF_LONG:
1262  value = ff_tget(&s->gb, type, s->le);
1263  break;
1264  case TIFF_RATIONAL:
1265  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1266  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1267  if (!value2) {
1268  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1269  return AVERROR_INVALIDDATA;
1270  }
1271 
1272  break;
1273  case TIFF_STRING:
1274  if (count <= 4) {
1275  break;
1276  }
1277  default:
1278  value = UINT_MAX;
1279  }
1280  }
1281 
1282  switch (tag) {
1283  case TIFF_SUBFILE:
1284  s->is_thumbnail = (value != 0);
1285  break;
1286  case TIFF_WIDTH:
1287  s->width = value;
1288  break;
1289  case TIFF_HEIGHT:
1290  s->height = value;
1291  break;
1292  case TIFF_BPP:
1293  if (count > 5U) {
1295  "This format is not supported (bpp=%d, %d components)\n",
1296  value, count);
1297  return AVERROR_INVALIDDATA;
1298  }
1299  s->bppcount = count;
1300  if (count == 1)
1301  s->bpp = value;
1302  else {
1303  switch (type) {
1304  case TIFF_BYTE:
1305  case TIFF_SHORT:
1306  case TIFF_LONG:
1307  s->bpp = 0;
1308  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1309  return AVERROR_INVALIDDATA;
1310  for (i = 0; i < count; i++)
1311  s->bpp += ff_tget(&s->gb, type, s->le);
1312  break;
1313  default:
1314  s->bpp = -1;
1315  }
1316  }
1317  break;
1319  if (count != 1) {
1321  "Samples per pixel requires a single value, many provided\n");
1322  return AVERROR_INVALIDDATA;
1323  }
1324  if (value > 5U) {
1326  "Samples per pixel %d is too large\n", value);
1327  return AVERROR_INVALIDDATA;
1328  }
1329  if (s->bppcount == 1)
1330  s->bpp *= value;
1331  s->bppcount = value;
1332  break;
1333  case TIFF_COMPR:
1334  s->compr = value;
1335  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1336  s->predictor = 0;
1337  switch (s->compr) {
1338  case TIFF_RAW:
1339  case TIFF_PACKBITS:
1340  case TIFF_LZW:
1341  case TIFF_CCITT_RLE:
1342  break;
1343  case TIFF_G3:
1344  case TIFF_G4:
1345  s->fax_opts = 0;
1346  break;
1347  case TIFF_DEFLATE:
1348  case TIFF_ADOBE_DEFLATE:
1349 #if CONFIG_ZLIB
1350  break;
1351 #else
1352  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1353  return AVERROR(ENOSYS);
1354 #endif
1355  case TIFF_JPEG:
1356  case TIFF_NEWJPEG:
1357  s->is_jpeg = 1;
1358  break;
1359  case TIFF_LZMA:
1360 #if CONFIG_LZMA
1361  break;
1362 #else
1363  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1364  return AVERROR(ENOSYS);
1365 #endif
1366  default:
1367  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1368  s->compr);
1369  return AVERROR_INVALIDDATA;
1370  }
1371  break;
1372  case TIFF_ROWSPERSTRIP:
1373  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1374  value = s->height;
1375  s->rps = FFMIN(value, s->height);
1376  break;
1377  case TIFF_STRIP_OFFS:
1378  if (count == 1) {
1379  if (value > INT_MAX) {
1381  "strippos %u too large\n", value);
1382  return AVERROR_INVALIDDATA;
1383  }
1384  s->strippos = 0;
1385  s->stripoff = value;
1386  } else
1387  s->strippos = off;
1388  s->strips = count;
1389  if (s->strips == 1)
1390  s->rps = s->height;
1391  s->sot = type;
1392  break;
1393  case TIFF_STRIP_SIZE:
1394  if (count == 1) {
1395  if (value > INT_MAX) {
1397  "stripsize %u too large\n", value);
1398  return AVERROR_INVALIDDATA;
1399  }
1400  s->stripsizesoff = 0;
1401  s->stripsize = value;
1402  s->strips = 1;
1403  } else {
1404  s->stripsizesoff = off;
1405  }
1406  s->strips = count;
1407  s->sstype = type;
1408  break;
1409  case TIFF_XRES:
1410  case TIFF_YRES:
1411  set_sar(s, tag, value, value2);
1412  break;
1413  case TIFF_TILE_OFFSETS:
1414  s->tile_offsets_offset = off;
1415  s->tile_count = count;
1416  s->is_tiled = 1;
1417  break;
1418  case TIFF_TILE_BYTE_COUNTS:
1419  s->tile_byte_counts_offset = off;
1420  break;
1421  case TIFF_TILE_LENGTH:
1422  s->tile_length = value;
1423  break;
1424  case TIFF_TILE_WIDTH:
1425  s->tile_width = value;
1426  break;
1427  case TIFF_PREDICTOR:
1428  s->predictor = value;
1429  break;
1430  case TIFF_SUB_IFDS:
1431  if (count == 1)
1432  s->sub_ifd = value;
1433  else if (count > 1)
1434  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1435  break;
1437  for (int i = 0; i < FFMIN(count, 1 << s->bpp); i++)
1438  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1439  break;
1440  case DNG_BLACK_LEVEL:
1441  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1442  if (type == TIFF_RATIONAL) {
1443  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1444  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1445  if (!value2) {
1446  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1447  return AVERROR_INVALIDDATA;
1448  }
1449 
1450  s->black_level = value / value2;
1451  } else
1452  s->black_level = ff_tget(&s->gb, type, s->le);
1453  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1454  } else {
1455  s->black_level = value / value2;
1456  }
1457  break;
1458  case DNG_WHITE_LEVEL:
1459  s->white_level = value;
1460  break;
1461  case TIFF_CFA_PATTERN_DIM:
1462  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1463  ff_tget(&s->gb, type, s->le) != 2)) {
1464  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1465  return AVERROR_INVALIDDATA;
1466  }
1467  break;
1468  case TIFF_CFA_PATTERN:
1469  s->is_bayer = 1;
1470  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1471  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1472  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1473  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1474  break;
1475  case TIFF_PHOTOMETRIC:
1476  switch (value) {
1479  case TIFF_PHOTOMETRIC_RGB:
1483  case TIFF_PHOTOMETRIC_CFA:
1484  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1485  s->photometric = value;
1486  break;
1494  "PhotometricInterpretation 0x%04X",
1495  value);
1496  return AVERROR_PATCHWELCOME;
1497  default:
1498  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1499  "unknown\n", value);
1500  return AVERROR_INVALIDDATA;
1501  }
1502  break;
1503  case TIFF_FILL_ORDER:
1504  if (value < 1 || value > 2) {
1506  "Unknown FillOrder value %d, trying default one\n", value);
1507  value = 1;
1508  }
1509  s->fill_order = value - 1;
1510  break;
1511  case TIFF_PAL: {
1512  GetByteContext pal_gb[3];
1513  off = type_sizes[type];
1514  if (count / 3 > 256 ||
1515  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1516  return AVERROR_INVALIDDATA;
1517 
1518  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1519  bytestream2_skip(&pal_gb[1], count / 3 * off);
1520  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1521 
1522  off = (type_sizes[type] - 1) << 3;
1523  if (off > 31U) {
1524  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1525  return AVERROR_INVALIDDATA;
1526  }
1527 
1528  for (i = 0; i < count / 3; i++) {
1529  uint32_t p = 0xFF000000;
1530  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1531  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1532  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1533  s->palette[i] = p;
1534  }
1535  s->palette_is_set = 1;
1536  break;
1537  }
1538  case TIFF_PLANAR:
1539  s->planar = value == 2;
1540  break;
1542  if (count != 2) {
1543  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1544  return AVERROR_INVALIDDATA;
1545  }
1546  for (i = 0; i < count; i++) {
1547  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1548  if (s->subsampling[i] <= 0) {
1549  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1550  s->subsampling[i] = 1;
1551  return AVERROR_INVALIDDATA;
1552  }
1553  }
1554  break;
1555  case TIFF_T4OPTIONS:
1556  if (s->compr == TIFF_G3)
1557  s->fax_opts = value;
1558  break;
1559  case TIFF_T6OPTIONS:
1560  if (s->compr == TIFF_G4)
1561  s->fax_opts = value;
1562  break;
1563 #define ADD_METADATA(count, name, sep)\
1564  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1565  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1566  goto end;\
1567  }
1569  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1570  break;
1572  ADD_METADATA(count, "ModelTransformationTag", NULL);
1573  break;
1574  case TIFF_MODEL_TIEPOINT:
1575  ADD_METADATA(count, "ModelTiepointTag", NULL);
1576  break;
1578  if (s->geotag_count) {
1579  avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
1580  return AVERROR_INVALIDDATA;
1581  }
1582  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1583  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1584  s->geotag_count = ff_tget_short(&s->gb, s->le);
1585  if (s->geotag_count > count / 4 - 1) {
1586  s->geotag_count = count / 4 - 1;
1587  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1588  }
1589  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1590  || s->geotag_count == 0) {
1591  s->geotag_count = 0;
1592  return -1;
1593  }
1594  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1595  if (!s->geotags) {
1596  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1597  s->geotag_count = 0;
1598  goto end;
1599  }
1600  for (i = 0; i < s->geotag_count; i++) {
1601  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1602  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1603  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1604 
1605  if (!s->geotags[i].type)
1606  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1607  else
1608  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1609  }
1610  break;
1612  if (count >= INT_MAX / sizeof(int64_t))
1613  return AVERROR_INVALIDDATA;
1614  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1615  return AVERROR_INVALIDDATA;
1616  dp = av_malloc_array(count, sizeof(double));
1617  if (!dp) {
1618  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1619  goto end;
1620  }
1621  for (i = 0; i < count; i++)
1622  dp[i] = ff_tget_double(&s->gb, s->le);
1623  for (i = 0; i < s->geotag_count; i++) {
1624  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1625  if (s->geotags[i].count == 0
1626  || s->geotags[i].offset + s->geotags[i].count > count) {
1627  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1628  } else if (s->geotags[i].val) {
1629  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1630  } else {
1631  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1632  if (!ap) {
1633  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1634  av_freep(&dp);
1635  return AVERROR(ENOMEM);
1636  }
1637  s->geotags[i].val = ap;
1638  }
1639  }
1640  }
1641  av_freep(&dp);
1642  break;
1643  case TIFF_GEO_ASCII_PARAMS:
1644  pos = bytestream2_tell(&s->gb);
1645  for (i = 0; i < s->geotag_count; i++) {
1646  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1647  if (s->geotags[i].count == 0
1648  || s->geotags[i].offset + s->geotags[i].count > count) {
1649  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1650  } else {
1651  char *ap;
1652 
1653  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1654  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1655  return AVERROR_INVALIDDATA;
1656  if (s->geotags[i].val)
1657  return AVERROR_INVALIDDATA;
1658  ap = av_malloc(s->geotags[i].count);
1659  if (!ap) {
1660  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1661  return AVERROR(ENOMEM);
1662  }
1663  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1664  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1665  s->geotags[i].val = ap;
1666  }
1667  }
1668  }
1669  break;
1670  case TIFF_ICC_PROFILE:
1671  if (type != TIFF_UNDEFINED)
1672  return AVERROR_INVALIDDATA;
1673 
1674  gb_temp = s->gb;
1675  bytestream2_seek(&gb_temp, SEEK_SET, off);
1676 
1677  if (bytestream2_get_bytes_left(&gb_temp) < count)
1678  return AVERROR_INVALIDDATA;
1679 
1681  if (!sd)
1682  return AVERROR(ENOMEM);
1683 
1684  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1685  break;
1686  case TIFF_ARTIST:
1687  ADD_METADATA(count, "artist", NULL);
1688  break;
1689  case TIFF_COPYRIGHT:
1690  ADD_METADATA(count, "copyright", NULL);
1691  break;
1692  case TIFF_DATE:
1693  ADD_METADATA(count, "date", NULL);
1694  break;
1695  case TIFF_DOCUMENT_NAME:
1696  ADD_METADATA(count, "document_name", NULL);
1697  break;
1698  case TIFF_HOST_COMPUTER:
1699  ADD_METADATA(count, "computer", NULL);
1700  break;
1702  ADD_METADATA(count, "description", NULL);
1703  break;
1704  case TIFF_MAKE:
1705  ADD_METADATA(count, "make", NULL);
1706  break;
1707  case TIFF_MODEL:
1708  ADD_METADATA(count, "model", NULL);
1709  break;
1710  case TIFF_PAGE_NAME:
1711  ADD_METADATA(count, "page_name", NULL);
1712  break;
1713  case TIFF_PAGE_NUMBER:
1714  ADD_METADATA(count, "page_number", " / ");
1715  // need to seek back to re-read the page number
1716  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1717  // read the page number
1718  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1719  // get back to where we were before the previous seek
1720  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1721  break;
1722  case TIFF_SOFTWARE_NAME:
1723  ADD_METADATA(count, "software", NULL);
1724  break;
1725  case DNG_VERSION:
1726  if (count == 4) {
1727  unsigned int ver[4];
1728  ver[0] = ff_tget(&s->gb, type, s->le);
1729  ver[1] = ff_tget(&s->gb, type, s->le);
1730  ver[2] = ff_tget(&s->gb, type, s->le);
1731  ver[3] = ff_tget(&s->gb, type, s->le);
1732 
1733  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1734  ver[0], ver[1], ver[2], ver[3]);
1735 
1737  }
1738  break;
1739  case CINEMADNG_TIME_CODES:
1740  case CINEMADNG_FRAME_RATE:
1741  case CINEMADNG_T_STOP:
1742  case CINEMADNG_REEL_NAME:
1745  break;
1746  default:
1747  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1749  "Unknown or unsupported tag %d/0x%0X\n",
1750  tag, tag);
1751  return AVERROR_INVALIDDATA;
1752  }
1753  }
1754 end:
1755  if (s->bpp > 64U) {
1757  "This format is not supported (bpp=%d, %d components)\n",
1758  s->bpp, count);
1759  s->bpp = 0;
1760  return AVERROR_INVALIDDATA;
1761  }
1762  bytestream2_seek(&s->gb, start, SEEK_SET);
1763  return 0;
1764 }
1765 
1766 static int decode_frame(AVCodecContext *avctx,
1767  void *data, int *got_frame, AVPacket *avpkt)
1768 {
1769  TiffContext *const s = avctx->priv_data;
1770  AVFrame *const p = data;
1771  ThreadFrame frame = { .f = data };
1772  unsigned off, last_off;
1773  int le, ret, plane, planes;
1774  int i, j, entries, stride;
1775  unsigned soff, ssize;
1776  uint8_t *dst;
1777  GetByteContext stripsizes;
1778  GetByteContext stripdata;
1779  int retry_for_subifd, retry_for_page;
1780  int is_dng;
1781  int has_tile_bits, has_strip_bits;
1782 
1783  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1784 
1785  // parse image header
1786  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1787  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1788  return ret;
1789  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1790  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1791  return AVERROR_INVALIDDATA;
1792  }
1793  s->le = le;
1794  // TIFF_BPP is not a required tag and defaults to 1
1795 
1797 again:
1798  s->is_thumbnail = 0;
1799  s->bppcount = s->bpp = 1;
1801  s->compr = TIFF_RAW;
1802  s->fill_order = 0;
1803  s->white_level = 0;
1804  s->is_bayer = 0;
1805  s->is_tiled = 0;
1806  s->is_jpeg = 0;
1807  s->cur_page = 0;
1808 
1809  for (i = 0; i < 65536; i++)
1810  s->dng_lut[i] = i;
1811 
1812  free_geotags(s);
1813 
1814  // Reset these offsets so we can tell if they were set this frame
1815  s->stripsizesoff = s->strippos = 0;
1816  /* parse image file directory */
1817  bytestream2_seek(&s->gb, off, SEEK_SET);
1818  entries = ff_tget_short(&s->gb, le);
1819  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1820  return AVERROR_INVALIDDATA;
1821  for (i = 0; i < entries; i++) {
1822  if ((ret = tiff_decode_tag(s, p)) < 0)
1823  return ret;
1824  }
1825 
1826  if (s->get_thumbnail && !s->is_thumbnail) {
1827  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1828  return AVERROR_EOF;
1829  }
1830 
1831  /** whether we should process this IFD's SubIFD */
1832  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1833  /** whether we should process this multi-page IFD's next page */
1834  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1835 
1836  last_off = off;
1837  if (retry_for_page) {
1838  // set offset to the next IFD
1839  off = ff_tget_long(&s->gb, le);
1840  } else if (retry_for_subifd) {
1841  // set offset to the SubIFD
1842  off = s->sub_ifd;
1843  }
1844 
1845  if (retry_for_subifd || retry_for_page) {
1846  if (!off) {
1847  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1848  return AVERROR_INVALIDDATA;
1849  }
1850  if (off <= last_off) {
1851  avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
1852  return AVERROR_INVALIDDATA;
1853  }
1854  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1855  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1856  return AVERROR_INVALIDDATA;
1857  }
1858  s->sub_ifd = 0;
1859  goto again;
1860  }
1861 
1862  /* At this point we've decided on which (Sub)IFD to process */
1863 
1864  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1865 
1866  for (i = 0; i<s->geotag_count; i++) {
1867  const char *keyname = get_geokey_name(s->geotags[i].key);
1868  if (!keyname) {
1869  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1870  continue;
1871  }
1872  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1873  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1874  continue;
1875  }
1876  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1877  if (ret<0) {
1878  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1879  return ret;
1880  }
1881  }
1882 
1883  if (is_dng) {
1884  int bps;
1885 
1886  if (s->white_level == 0)
1887  s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
1888 
1889  if (s->white_level <= s->black_level) {
1890  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1891  s->black_level, s->white_level);
1892  return AVERROR_INVALIDDATA;
1893  }
1894 
1895  if (s->bpp % s->bppcount)
1896  return AVERROR_INVALIDDATA;
1897  bps = s->bpp / s->bppcount;
1898  if (bps < 8 || bps > 32)
1899  return AVERROR_INVALIDDATA;
1900  if (s->planar)
1901  return AVERROR_PATCHWELCOME;
1902  }
1903 
1904  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1905  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1906  return AVERROR_INVALIDDATA;
1907  }
1908 
1909  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
1910  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
1911 
1912  if (has_tile_bits && has_strip_bits) {
1913  av_log(avctx, AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
1914  return AVERROR_INVALIDDATA;
1915  }
1916 
1917  /* now we have the data and may start decoding */
1918  if ((ret = init_image(s, &frame)) < 0)
1919  return ret;
1920 
1921  if (!s->is_tiled) {
1922  if (s->strips == 1 && !s->stripsize) {
1923  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1924  s->stripsize = avpkt->size - s->stripoff;
1925  }
1926 
1927  if (s->stripsizesoff) {
1928  if (s->stripsizesoff >= (unsigned)avpkt->size)
1929  return AVERROR_INVALIDDATA;
1930  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1931  avpkt->size - s->stripsizesoff);
1932  }
1933  if (s->strippos) {
1934  if (s->strippos >= (unsigned)avpkt->size)
1935  return AVERROR_INVALIDDATA;
1936  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1937  avpkt->size - s->strippos);
1938  }
1939 
1940  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1941  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1942  return AVERROR_INVALIDDATA;
1943  }
1944  }
1945 
1949  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1951  }
1952 
1953  /* Handle DNG images with JPEG-compressed tiles */
1954 
1955  if (is_dng && s->is_tiled) {
1956  if (!s->is_jpeg) {
1957  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1958  return AVERROR_PATCHWELCOME;
1959  } else if (!s->is_bayer) {
1960  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1961  return AVERROR_PATCHWELCOME;
1962  } else {
1963  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1964  *got_frame = 1;
1965  return ret;
1966  }
1967  }
1968 
1969  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1970 
1971  planes = s->planar ? s->bppcount : 1;
1972  for (plane = 0; plane < planes; plane++) {
1973  uint8_t *five_planes = NULL;
1974  int remaining = avpkt->size;
1975  int decoded_height;
1976  stride = p->linesize[plane];
1977  dst = p->data[plane];
1979  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1980  stride = stride * 5 / 4;
1981  five_planes =
1982  dst = av_malloc(stride * s->height);
1983  if (!dst)
1984  return AVERROR(ENOMEM);
1985  }
1986  for (i = 0; i < s->height; i += s->rps) {
1987  if (i)
1988  dst += s->rps * stride;
1989  if (s->stripsizesoff)
1990  ssize = ff_tget(&stripsizes, s->sstype, le);
1991  else
1992  ssize = s->stripsize;
1993 
1994  if (s->strippos)
1995  soff = ff_tget(&stripdata, s->sot, le);
1996  else
1997  soff = s->stripoff;
1998 
1999  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2000  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2001  av_freep(&five_planes);
2002  return AVERROR_INVALIDDATA;
2003  }
2004  remaining -= ssize;
2005  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2006  FFMIN(s->rps, s->height - i))) < 0) {
2007  if (avctx->err_recognition & AV_EF_EXPLODE) {
2008  av_freep(&five_planes);
2009  return ret;
2010  }
2011  break;
2012  }
2013  }
2014  decoded_height = FFMIN(i, s->height);
2015 
2016  if (s->predictor == 2) {
2017  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2018  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2019  return AVERROR_PATCHWELCOME;
2020  }
2021  dst = five_planes ? five_planes : p->data[plane];
2022  soff = s->bpp >> 3;
2023  if (s->planar)
2024  soff = FFMAX(soff / s->bppcount, 1);
2025  ssize = s->width * soff;
2026  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2029  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2032  for (i = 0; i < decoded_height; i++) {
2033  for (j = soff; j < ssize; j += 2)
2034  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2035  dst += stride;
2036  }
2037  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2040  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2043  for (i = 0; i < decoded_height; i++) {
2044  for (j = soff; j < ssize; j += 2)
2045  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2046  dst += stride;
2047  }
2048  } else {
2049  for (i = 0; i < decoded_height; i++) {
2050  for (j = soff; j < ssize; j++)
2051  dst[j] += dst[j - soff];
2052  dst += stride;
2053  }
2054  }
2055  }
2056 
2058  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2059  dst = p->data[plane];
2060  for (i = 0; i < s->height; i++) {
2061  for (j = 0; j < stride; j++)
2062  dst[j] = c - dst[j];
2063  dst += stride;
2064  }
2065  }
2066 
2069  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2070  uint8_t *src = five_planes ? five_planes : p->data[plane];
2071  dst = p->data[plane];
2072  for (i = 0; i < s->height; i++) {
2073  for (j = 0; j < s->width; j++) {
2074  int k = 255 - src[x * j + 3];
2075  int r = (255 - src[x * j ]) * k;
2076  int g = (255 - src[x * j + 1]) * k;
2077  int b = (255 - src[x * j + 2]) * k;
2078  dst[4 * j ] = r * 257 >> 16;
2079  dst[4 * j + 1] = g * 257 >> 16;
2080  dst[4 * j + 2] = b * 257 >> 16;
2081  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2082  }
2083  src += stride;
2084  dst += p->linesize[plane];
2085  }
2086  av_freep(&five_planes);
2087  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2089  dst = p->data[plane];
2090  for (i = 0; i < s->height; i++) {
2091  for (j = 0; j < s->width; j++) {
2092  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2093  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2094  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2095  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2096  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2097  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2098  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2099  AV_WB16(dst + 8 * j + 6, 65535);
2100  }
2101  dst += p->linesize[plane];
2102  }
2103  }
2104  }
2105 
2106  if (s->planar && s->bppcount > 2) {
2107  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2108  FFSWAP(int, p->linesize[0], p->linesize[2]);
2109  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2110  FFSWAP(int, p->linesize[0], p->linesize[1]);
2111  }
2112 
2113  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2114  uint16_t *dst = (uint16_t *)p->data[0];
2115  for (i = 0; i < s->height; i++) {
2116  for (j = 0; j < s->width; j++)
2117  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2118  dst += stride / 2;
2119  }
2120  }
2121 
2122  *got_frame = 1;
2123 
2124  return avpkt->size;
2125 }
2126 
2128 {
2129  TiffContext *s = avctx->priv_data;
2130  const AVCodec *codec;
2131  int ret;
2132 
2133  s->width = 0;
2134  s->height = 0;
2135  s->subsampling[0] =
2136  s->subsampling[1] = 1;
2137  s->avctx = avctx;
2138  ff_lzw_decode_open(&s->lzw);
2139  if (!s->lzw)
2140  return AVERROR(ENOMEM);
2142 
2143  /* Allocate JPEG frame */
2144  s->jpgframe = av_frame_alloc();
2145  if (!s->jpgframe)
2146  return AVERROR(ENOMEM);
2147 
2148  /* Prepare everything needed for JPEG decoding */
2150  if (!codec)
2151  return AVERROR_BUG;
2152  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2153  if (!s->avctx_mjpeg)
2154  return AVERROR(ENOMEM);
2155  s->avctx_mjpeg->flags = avctx->flags;
2156  s->avctx_mjpeg->flags2 = avctx->flags2;
2157  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2158  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2159  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2160  if (ret < 0) {
2161  return ret;
2162  }
2163 
2164  return 0;
2165 }
2166 
2167 static av_cold int tiff_end(AVCodecContext *avctx)
2168 {
2169  TiffContext *const s = avctx->priv_data;
2170 
2171  free_geotags(s);
2172 
2173  ff_lzw_decode_close(&s->lzw);
2174  av_freep(&s->deinvert_buf);
2175  s->deinvert_buf_size = 0;
2176  av_freep(&s->yuv_line);
2177  s->yuv_line_size = 0;
2178  av_freep(&s->fax_buffer);
2179  s->fax_buffer_size = 0;
2180  av_frame_free(&s->jpgframe);
2182  return 0;
2183 }
2184 
2185 #define OFFSET(x) offsetof(TiffContext, x)
2186 static const AVOption tiff_options[] = {
2187  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2188  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2189  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2190  { NULL },
2191 };
2192 
2193 static const AVClass tiff_decoder_class = {
2194  .class_name = "TIFF decoder",
2195  .item_name = av_default_item_name,
2196  .option = tiff_options,
2197  .version = LIBAVUTIL_VERSION_INT,
2198 };
2199 
2201  .name = "tiff",
2202  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2203  .type = AVMEDIA_TYPE_VIDEO,
2204  .id = AV_CODEC_ID_TIFF,
2205  .priv_data_size = sizeof(TiffContext),
2206  .init = tiff_init,
2207  .close = tiff_end,
2208  .decode = decode_frame,
2209  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2210  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2211  .priv_class = &tiff_decoder_class,
2212 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
Definition: tiff.h:64
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:805
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
#define NULL
Definition: coverity.c:32
Definition: tiff.h:121
int offset
Definition: tiff.h:211
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:812
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values ...
Definition: tiff.h:183
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1716
AVOption.
Definition: opt.h:248
int tile_offsets_offset
Definition: tiff.c:99
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:492
int fill_order
Definition: tiff.c:79
unsigned int bpp
Definition: tiff.c:69
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
int geotag_count
Definition: tiff.c:112
uint16_t dng_lut[65536]
Definition: tiff.c:87
uint32_t res[4]
Definition: tiff.c:80
Definition: tiff.h:63
int sstype
Definition: tiff.c:92
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVFrame * f
Definition: thread.h:35
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
uint8_t pattern[4]
Definition: tiff.c:84
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:105
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
const char * g
Definition: vf_curves.c:115
const char *const name
Definition: tiff.h:217
const char * desc
Definition: nvenc.c:87
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
const uint8_t ff_reverse[256]
Definition: reverse.c:23
#define avpriv_request_sample(...)
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:262
Definition: tiff.h:57
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:263
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
unsigned white_level
Definition: tiff.c:86
int size
Definition: packet.h:356
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:162
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
GLint GLenum type
Definition: opengl_enc.c:104
static void free_geotags(TiffContext *const s)
Definition: tiff.c:121
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:71
const char * key
uint8_t * fax_buffer
Definition: tiff.c:109
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:260
enum TiffType tiff_type
Definition: tiff.c:67
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
unsigned int yuv_line_size
Definition: tiff.c:108
AVCodec.
Definition: codec.h:190
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
Definition: tiff.h:125
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
Definition: tiff.h:124
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1766
Macro definitions for various function/variable attributes.
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:556
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1241
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int deinvert_buf_size
Definition: tiff.c:106
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int get_subimage
Definition: tiff.c:63
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5) ...
Definition: tiff.c:784
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
char * val
Definition: tiff.h:212
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2127
int is_thumbnail
Definition: tiff.c:81
Structure to hold side data for an AVFrame.
Definition: frame.h:214
int get_thumbnail
Definition: tiff.c:65
uint8_t * data
Definition: packet.h:355
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1532
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:265
int stripoff
Definition: tiff.c:94
#define AVERROR_EOF
End of file.
Definition: error.h:55
bitstream reader API header.
AVDictionary * metadata
metadata.
Definition: frame.h:594
int tile_count
Definition: tiff.c:101
ptrdiff_t size
Definition: opengl_enc.c:100
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
LZWState * lzw
Definition: tiff.c:95
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:426
#define av_log(a,...)
Definition: tiff.h:78
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
int planar
Definition: tiff.c:75
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
Definition: lzw.c:46
int height
Definition: tiff.c:68
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
error code definitions
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:537
unsigned int fax_buffer_size
Definition: tiff.c:110
enum TiffGeoTagKey key
Definition: tiff.h:208
int sot
Definition: tiff.c:93
TIFF data tables.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define pv
Definition: regdef.h:60
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2167
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
const char * r
Definition: vf_curves.c:114
unsigned int pos
Definition: spdifenc.c:410
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:335
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
uint16_t get_page
Definition: tiff.c:64
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1222
int width
Definition: tiff.c:68
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:521
int strips
Definition: tiff.c:92
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:381
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
#define FFMAX(a, b)
Definition: common.h:94
uint8_t * yuv_line
Definition: tiff.c:107
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int predictor
Definition: tiff.c:78
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
enum TiffPhotometric photometric
Definition: tiff.c:74
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int stripsize
Definition: tiff.c:94
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
static const AVOption tiff_options[]
Definition: tiff.c:2186
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define FFMIN(a, b)
Definition: common.h:96
uint32_t sub_ifd
Definition: tiff.c:89
int le
Definition: tiff.c:72
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1036
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:172
int width
picture width / height.
Definition: avcodec.h:699
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1729
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
uint8_t w
Definition: llviddspenc.c:38
int rps
Definition: tiff.c:92
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:361
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:116
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int is_bayer
Definition: tiff.c:83
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
int is_tiled
Definition: tiff.c:98
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:280
int is_jpeg
Definition: tiff.c:103
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:423
#define FF_ARRAY_ELEMS(a)
static const struct @316 planes[]
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
if(ret)
int palette_is_set
Definition: tiff.c:71
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:425
uint32_t palette[256]
Definition: tiff.c:70
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
Definition: tiff.h:51
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
static const char * get_geokey_name(int key)
Definition: tiff.c:137
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:120
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:187
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
enum TiffCompr compr
Definition: tiff.c:73
unsigned int bppcount
Definition: tiff.c:69
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
main external API structure.
Definition: avcodec.h:526
Definition: tiff.h:123
uint8_t * data
Definition: frame.h:216
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVCodecContext * avctx
Definition: tiff.c:56
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int subsampling[2]
Definition: tiff.c:76
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
#define snprintf
Definition: snprintf.h:34
uint16_t cur_page
Definition: tiff.c:90
static int get_geokey_type(int key)
Definition: tiff.c:147
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:920
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
int tile_length
Definition: tiff.c:100
int strippos
Definition: tiff.c:94
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:875
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
int count
Definition: tiff.h:210
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
enum TiffTags type
Definition: tiff.h:209
LZW decoding routines.
#define OFFSET(x)
Definition: tiff.c:2185
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:961
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int stripsizesoff
Definition: tiff.c:94
Y , 8bpp.
Definition: pixfmt.h:74
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
int tile_byte_counts_offset
Definition: tiff.c:99
uint8_t * deinvert_buf
Definition: tiff.c:105
common internal api header.
static char * get_geokey_val(int key, int val)
Definition: tiff.c:171
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:328
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:239
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:261
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
AVFrame * jpgframe
Definition: tiff.c:61
int den
Denominator.
Definition: rational.h:60
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
unsigned bps
Definition: movenc.c:1533
#define RET_GEOKEY_VAL(TYPE, array)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
int tile_width
Definition: tiff.c:100
void * priv_data
Definition: avcodec.h:553
#define av_free(p)
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
int len
Digital Negative (DNG) image.
Definition: tiff.h:41
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
int fax_opts
Definition: tiff.c:77
static const AVClass tiff_decoder_class
Definition: tiff.c:2193
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:132
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
unsigned black_level
Definition: tiff.c:85
int height
Definition: frame.h:366
#define av_freep(p)
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1049
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
AVCodec ff_tiff_decoder
Definition: tiff.c:2200
#define av_always_inline
Definition: attributes.h:45
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext gb
Definition: tiff.c:57
#define stride
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:157
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffGeoTag * geotags
Definition: tiff.c:113
MJPEG decoder.
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
#define ADD_METADATA(count, name, sep)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
Definition: tiff.h:74
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:332
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
CCITT Fax Group 3 and 4 decompression.
int i
Definition: input.c:406
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:348
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
const char * name
Definition: opengl_enc.c:102