FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82 
83  int is_bayer;
85  unsigned black_level;
86  unsigned white_level;
87  uint16_t dng_lut[65536];
88 
89  uint32_t sub_ifd;
90  uint16_t cur_page;
91 
92  int strips, rps, sstype;
93  int sot;
96 
97  /* Tile support */
98  int is_tiled;
102 
103  int is_jpeg;
104 
108  unsigned int yuv_line_size;
110  unsigned int fax_buffer_size;
111 
114 } TiffContext;
115 
117  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
118  s->tiff_type = tiff_type;
119 }
120 
121 static void free_geotags(TiffContext *const s)
122 {
123  int i;
124  for (i = 0; i < s->geotag_count; i++) {
125  if (s->geotags[i].val)
126  av_freep(&s->geotags[i].val);
127  }
128  av_freep(&s->geotags);
129  s->geotag_count = 0;
130 }
131 
132 #define RET_GEOKEY(TYPE, array, element)\
133  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
134  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
135  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
136 
137 static const char *get_geokey_name(int key)
138 {
139  RET_GEOKEY(VERT, vert, name);
140  RET_GEOKEY(PROJ, proj, name);
141  RET_GEOKEY(GEOG, geog, name);
142  RET_GEOKEY(CONF, conf, name);
143 
144  return NULL;
145 }
146 
147 static int get_geokey_type(int key)
148 {
149  RET_GEOKEY(VERT, vert, type);
150  RET_GEOKEY(PROJ, proj, type);
151  RET_GEOKEY(GEOG, geog, type);
152  RET_GEOKEY(CONF, conf, type);
153 
154  return AVERROR_INVALIDDATA;
155 }
156 
157 static int cmp_id_key(const void *id, const void *k)
158 {
159  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
160 }
161 
162 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
163 {
164  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
165  if(r)
166  return r->name;
167 
168  return NULL;
169 }
170 
171 static char *get_geokey_val(int key, int val)
172 {
173  char *ap;
174 
175  if (val == TIFF_GEO_KEY_UNDEFINED)
176  return av_strdup("undefined");
177  if (val == TIFF_GEO_KEY_USER_DEFINED)
178  return av_strdup("User-Defined");
179 
180 #define RET_GEOKEY_VAL(TYPE, array)\
181  if (val >= TIFF_##TYPE##_OFFSET &&\
182  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
183  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
184 
185  switch (key) {
187  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
188  break;
190  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
191  break;
195  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
196  break;
199  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
200  break;
202  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
203  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
204  break;
206  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
207  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
208  break;
210  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
211  break;
213  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
214  break;
217  if(ap) return ap;
218  break;
221  if(ap) return ap;
222  break;
224  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
225  break;
227  RET_GEOKEY_VAL(VERT_CS, vert_cs);
228  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
229  break;
230 
231  }
232 
233  ap = av_malloc(14);
234  if (ap)
235  snprintf(ap, 14, "Unknown-%d", val);
236  return ap;
237 }
238 
239 static char *doubles2str(double *dp, int count, const char *sep)
240 {
241  int i;
242  char *ap, *ap0;
243  uint64_t component_len;
244  if (!sep) sep = ", ";
245  component_len = 24LL + strlen(sep);
246  if (count >= (INT_MAX - 1)/component_len)
247  return NULL;
248  ap = av_malloc(component_len * count + 1);
249  if (!ap)
250  return NULL;
251  ap0 = ap;
252  ap[0] = '\0';
253  for (i = 0; i < count; i++) {
254  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
255  if(l >= component_len) {
256  av_free(ap0);
257  return NULL;
258  }
259  ap += l;
260  }
261  ap0[strlen(ap0) - strlen(sep)] = '\0';
262  return ap0;
263 }
264 
265 static int add_metadata(int count, int type,
266  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
267 {
268  switch(type) {
269  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
270  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
271  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
272  default : return AVERROR_INVALIDDATA;
273  };
274 }
275 
276 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
277  const uint8_t *src, int src_stride, int width, int height,
278  int is_single_comp, int is_u16);
279 
281  unsigned int bpp, uint8_t* dst,
282  int usePtr, const uint8_t *src,
283  uint8_t c, int width, int offset)
284 {
285  switch (bpp) {
286  case 1:
287  while (--width >= 0) {
288  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
289  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
290  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
291  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
292  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
293  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
294  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
295  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
296  }
297  break;
298  case 2:
299  while (--width >= 0) {
300  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
301  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
302  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
303  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
304  }
305  break;
306  case 4:
307  while (--width >= 0) {
308  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
309  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
310  }
311  break;
312  case 10:
313  case 12:
314  case 14: {
315  uint16_t *dst16 = (uint16_t *)dst;
316  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
317  uint8_t shift = is_dng ? 0 : 16 - bpp;
319 
320  init_get_bits8(&gb, src, width);
321  for (int i = 0; i < s->width; i++) {
322  dst16[i] = get_bits(&gb, bpp) << shift;
323  }
324  }
325  break;
326  default:
327  if (usePtr) {
328  memcpy(dst + offset, src, width);
329  } else {
330  memset(dst + offset, c, width);
331  }
332  }
333 }
334 
335 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
336 {
337  int i;
338 
340  if (!s->deinvert_buf)
341  return AVERROR(ENOMEM);
342  for (i = 0; i < size; i++)
343  s->deinvert_buf[i] = ff_reverse[src[i]];
344 
345  return 0;
346 }
347 
348 static void unpack_gray(TiffContext *s, AVFrame *p,
349  const uint8_t *src, int lnum, int width, int bpp)
350 {
352  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
353 
354  init_get_bits8(&gb, src, width);
355 
356  for (int i = 0; i < s->width; i++) {
357  dst[i] = get_bits(&gb, bpp);
358  }
359 }
360 
361 static void unpack_yuv(TiffContext *s, AVFrame *p,
362  const uint8_t *src, int lnum)
363 {
364  int i, j, k;
365  int w = (s->width - 1) / s->subsampling[0] + 1;
366  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
367  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
368  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
369  for (i = 0; i < w; i++) {
370  for (j = 0; j < s->subsampling[1]; j++)
371  for (k = 0; k < s->subsampling[0]; k++)
372  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
373  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
374  *pu++ = *src++;
375  *pv++ = *src++;
376  }
377  }else{
378  for (i = 0; i < w; i++) {
379  for (j = 0; j < s->subsampling[1]; j++)
380  for (k = 0; k < s->subsampling[0]; k++)
381  p->data[0][(lnum + j) * p->linesize[0] +
382  i * s->subsampling[0] + k] = *src++;
383  *pu++ = *src++;
384  *pv++ = *src++;
385  }
386  }
387 }
388 
389 #if CONFIG_ZLIB
390 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
391  int size)
392 {
393  z_stream zstream = { 0 };
394  int zret;
395 
396  zstream.next_in = (uint8_t *)src;
397  zstream.avail_in = size;
398  zstream.next_out = dst;
399  zstream.avail_out = *len;
400  zret = inflateInit(&zstream);
401  if (zret != Z_OK) {
402  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
403  return zret;
404  }
405  zret = inflate(&zstream, Z_SYNC_FLUSH);
406  inflateEnd(&zstream);
407  *len = zstream.total_out;
408  return zret == Z_STREAM_END ? Z_OK : zret;
409 }
410 
411 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
412  const uint8_t *src, int size, int width, int lines,
413  int strip_start, int is_yuv)
414 {
415  uint8_t *zbuf;
416  unsigned long outlen;
417  int ret, line;
418  outlen = width * lines;
419  zbuf = av_malloc(outlen);
420  if (!zbuf)
421  return AVERROR(ENOMEM);
422  if (s->fill_order) {
423  if ((ret = deinvert_buffer(s, src, size)) < 0) {
424  av_free(zbuf);
425  return ret;
426  }
427  src = s->deinvert_buf;
428  }
429  ret = tiff_uncompress(zbuf, &outlen, src, size);
430  if (ret != Z_OK) {
432  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
433  (unsigned long)width * lines, ret);
434  av_free(zbuf);
435  return AVERROR_UNKNOWN;
436  }
437  src = zbuf;
438  for (line = 0; line < lines; line++) {
439  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
440  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
441  } else {
442  memcpy(dst, src, width);
443  }
444  if (is_yuv) {
445  unpack_yuv(s, p, dst, strip_start + line);
446  line += s->subsampling[1] - 1;
447  }
448  dst += stride;
449  src += width;
450  }
451  av_free(zbuf);
452  return 0;
453 }
454 #endif
455 
456 #if CONFIG_LZMA
457 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
458  int size)
459 {
460  lzma_stream stream = LZMA_STREAM_INIT;
461  lzma_ret ret;
462 
463  stream.next_in = (uint8_t *)src;
464  stream.avail_in = size;
465  stream.next_out = dst;
466  stream.avail_out = *len;
467  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
468  if (ret != LZMA_OK) {
469  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
470  return ret;
471  }
472  ret = lzma_code(&stream, LZMA_RUN);
473  lzma_end(&stream);
474  *len = stream.total_out;
475  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
476 }
477 
478 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
479  const uint8_t *src, int size, int width, int lines,
480  int strip_start, int is_yuv)
481 {
482  uint64_t outlen = width * (uint64_t)lines;
483  int ret, line;
484  uint8_t *buf = av_malloc(outlen);
485  if (!buf)
486  return AVERROR(ENOMEM);
487  if (s->fill_order) {
488  if ((ret = deinvert_buffer(s, src, size)) < 0) {
489  av_free(buf);
490  return ret;
491  }
492  src = s->deinvert_buf;
493  }
494  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
495  if (ret != LZMA_OK) {
497  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
498  (uint64_t)width * lines, ret);
499  av_free(buf);
500  return AVERROR_UNKNOWN;
501  }
502  src = buf;
503  for (line = 0; line < lines; line++) {
504  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
505  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
506  } else {
507  memcpy(dst, src, width);
508  }
509  if (is_yuv) {
510  unpack_yuv(s, p, dst, strip_start + line);
511  line += s->subsampling[1] - 1;
512  }
513  dst += stride;
514  src += width;
515  }
516  av_free(buf);
517  return 0;
518 }
519 #endif
520 
521 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
522  const uint8_t *src, int size, int width, int lines)
523 {
524  int i, ret = 0;
525  int line;
526  uint8_t *src2;
527 
529  src2 = s->fax_buffer;
530 
531  if (!src2) {
533  "Error allocating temporary buffer\n");
534  return AVERROR(ENOMEM);
535  }
536 
537  if (!s->fill_order) {
538  memcpy(src2, src, size);
539  } else {
540  for (i = 0; i < size; i++)
541  src2[i] = ff_reverse[src[i]];
542  }
543  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
544  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
545  s->compr, s->fax_opts);
546  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
547  for (line = 0; line < lines; line++) {
548  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
549  dst += stride;
550  }
551  return ret;
552 }
553 
555 
556 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
557  const uint8_t *src, int size, int strip_start, int lines)
558 {
559  PutByteContext pb;
560  int c, line, pixels, code, ret;
561  const uint8_t *ssrc = src;
562  int width = ((s->width * s->bpp) + 7) >> 3;
564  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
565  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
566  desc->nb_components >= 3;
567  int is_dng;
568 
569  if (s->planar)
570  width /= s->bppcount;
571 
572  if (size <= 0)
573  return AVERROR_INVALIDDATA;
574 
575  if (is_yuv) {
576  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
577  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
578  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
579  if (s->yuv_line == NULL) {
580  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
581  return AVERROR(ENOMEM);
582  }
583  dst = s->yuv_line;
584  stride = 0;
585 
586  width = (s->width - 1) / s->subsampling[0] + 1;
587  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
588  av_assert0(width <= bytes_per_row);
589  av_assert0(s->bpp == 24);
590  }
591  if (s->is_bayer) {
592  width = (s->bpp * s->width + 7) >> 3;
593  }
594  if (p->format == AV_PIX_FMT_GRAY12) {
596  if (s->yuv_line == NULL) {
597  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
598  return AVERROR(ENOMEM);
599  }
600  dst = s->yuv_line;
601  stride = 0;
602  }
603 
604  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
605 #if CONFIG_ZLIB
606  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
607  strip_start, is_yuv);
608 #else
610  "zlib support not enabled, "
611  "deflate compression not supported\n");
612  return AVERROR(ENOSYS);
613 #endif
614  }
615  if (s->compr == TIFF_LZMA) {
616 #if CONFIG_LZMA
617  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
618  strip_start, is_yuv);
619 #else
621  "LZMA support not enabled\n");
622  return AVERROR(ENOSYS);
623 #endif
624  }
625  if (s->compr == TIFF_LZW) {
626  if (s->fill_order) {
627  if ((ret = deinvert_buffer(s, src, size)) < 0)
628  return ret;
629  ssrc = src = s->deinvert_buf;
630  }
631  if (size > 1 && !src[0] && (src[1]&1)) {
632  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
633  }
634  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
635  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
636  return ret;
637  }
638  for (line = 0; line < lines; line++) {
639  pixels = ff_lzw_decode(s->lzw, dst, width);
640  if (pixels < width) {
641  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
642  pixels, width);
643  return AVERROR_INVALIDDATA;
644  }
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
647  if (is_yuv) {
648  unpack_yuv(s, p, dst, strip_start + line);
649  line += s->subsampling[1] - 1;
650  } else if (p->format == AV_PIX_FMT_GRAY12) {
651  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
652  }
653  dst += stride;
654  }
655  return 0;
656  }
657  if (s->compr == TIFF_CCITT_RLE ||
658  s->compr == TIFF_G3 ||
659  s->compr == TIFF_G4) {
660  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
661  return AVERROR_INVALIDDATA;
662 
663  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
664  }
665 
666  bytestream2_init(&s->gb, src, size);
667  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
668 
669  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
670 
671  /* Decode JPEG-encoded DNGs with strips */
672  if (s->compr == TIFF_NEWJPEG && is_dng) {
673  if (s->strips > 1) {
674  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
678  return ret;
679  return 0;
680  }
681 
682  for (line = 0; line < lines; line++) {
683  if (src - ssrc > size) {
684  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
685  return AVERROR_INVALIDDATA;
686  }
687 
688  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
689  break;
690  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
691  switch (s->compr) {
692  case TIFF_RAW:
693  if (ssrc + size - src < width)
694  return AVERROR_INVALIDDATA;
695 
696  if (!s->fill_order) {
697  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
698  dst, 1, src, 0, width, 0);
699  } else {
700  int i;
701  for (i = 0; i < width; i++)
702  dst[i] = ff_reverse[src[i]];
703  }
704 
705  /* Color processing for DNG images with uncompressed strips (non-tiled) */
706  if (is_dng) {
707  int is_u16, pixel_size_bytes, pixel_size_bits;
708 
709  is_u16 = (s->bpp > 8);
710  pixel_size_bits = (is_u16 ? 16 : 8);
711  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
712 
713  dng_blit(s,
714  dst,
715  0, // no stride, only 1 line
716  dst,
717  0, // no stride, only 1 line
718  width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount, // need to account for [1, 16] bpp
719  1,
720  0, // single-component variation is only preset in JPEG-encoded DNGs
721  is_u16);
722  }
723 
724  src += width;
725  break;
726  case TIFF_PACKBITS:
727  for (pixels = 0; pixels < width;) {
728  if (ssrc + size - src < 2) {
729  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
730  return AVERROR_INVALIDDATA;
731  }
732  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
733  if (code >= 0) {
734  code++;
735  if (pixels + code > width ||
736  ssrc + size - src < code) {
738  "Copy went out of bounds\n");
739  return AVERROR_INVALIDDATA;
740  }
742  dst, 1, src, 0, code, pixels);
743  src += code;
744  pixels += code;
745  } else if (code != -128) { // -127..-1
746  code = (-code) + 1;
747  if (pixels + code > width) {
749  "Run went out of bounds\n");
750  return AVERROR_INVALIDDATA;
751  }
752  c = *src++;
754  dst, 0, NULL, c, code, pixels);
755  pixels += code;
756  }
757  }
758  if (s->fill_order) {
759  int i;
760  for (i = 0; i < width; i++)
761  dst[i] = ff_reverse[dst[i]];
762  }
763  break;
764  }
765  if (is_yuv) {
766  unpack_yuv(s, p, dst, strip_start + line);
767  line += s->subsampling[1] - 1;
768  } else if (p->format == AV_PIX_FMT_GRAY12) {
769  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
770  }
771  dst += stride;
772  }
773  return 0;
774 }
775 
776 /**
777  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
778  */
779 static uint16_t av_always_inline dng_process_color16(uint16_t value,
780  const uint16_t *lut,
781  uint16_t black_level,
782  float scale_factor) {
783  float value_norm;
784 
785  // Lookup table lookup
786  if (lut)
787  value = lut[value];
788 
789  // Black level subtraction
790  value = av_clip_uint16_c((unsigned)value - black_level);
791 
792  // Color scaling
793  value_norm = (float)value * scale_factor;
794 
795  value = av_clip_uint16_c(value_norm * 65535);
796 
797  return value;
798 }
799 
800 static uint16_t av_always_inline dng_process_color8(uint16_t value,
801  const uint16_t *lut,
802  uint16_t black_level,
803  float scale_factor) {
804  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
805 }
806 
807 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
808  const uint8_t *src, int src_stride,
809  int width, int height, int is_single_comp, int is_u16)
810 {
811  int line, col;
812  float scale_factor;
813 
814  scale_factor = 1.0f / (s->white_level - s->black_level);
815 
816  if (is_single_comp) {
817  if (!is_u16)
818  return; /* <= 8bpp unsupported */
819 
820  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
821  (split vertically in the middle). */
822  for (line = 0; line < height / 2; line++) {
823  uint16_t *dst_u16 = (uint16_t *)dst;
824  uint16_t *src_u16 = (uint16_t *)src;
825 
826  /* Blit first half of input row row to initial row of output */
827  for (col = 0; col < width; col++)
828  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
829 
830  /* Advance the destination pointer by a row (source pointer remains in the same place) */
831  dst += dst_stride * sizeof(uint16_t);
832  dst_u16 = (uint16_t *)dst;
833 
834  /* Blit second half of input row row to next row of output */
835  for (col = 0; col < width; col++)
836  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
837 
838  dst += dst_stride * sizeof(uint16_t);
839  src += src_stride * sizeof(uint16_t);
840  }
841  } else {
842  /* Input and output image are the same size and the MJpeg decoder has done per-component
843  deinterleaving, so blitting here is straightforward. */
844  if (is_u16) {
845  for (line = 0; line < height; line++) {
846  uint16_t *dst_u16 = (uint16_t *)dst;
847  uint16_t *src_u16 = (uint16_t *)src;
848 
849  for (col = 0; col < width; col++)
850  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
851 
852  dst += dst_stride * sizeof(uint16_t);
853  src += src_stride * sizeof(uint16_t);
854  }
855  } else {
856  for (line = 0; line < height; line++) {
857  for (col = 0; col < width; col++)
858  *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
859 
860  dst += dst_stride;
861  src += src_stride;
862  }
863  }
864  }
865 }
866 
867 static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
868  int tile_byte_count, int dst_x, int dst_y, int w, int h)
869 {
870  TiffContext *s = avctx->priv_data;
871  AVPacket jpkt;
872  uint8_t *dst_data, *src_data;
873  uint32_t dst_offset; /* offset from dst buffer in pixels */
874  int is_single_comp, is_u16, pixel_size;
875  int ret;
876 
877  /* Prepare a packet and send to the MJPEG decoder */
878  av_init_packet(&jpkt);
879  jpkt.data = (uint8_t*)s->gb.buffer;
880  jpkt.size = tile_byte_count;
881 
882  if (s->is_bayer) {
883  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
884  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
885  image or not from its own data (and we need that information when decoding it). */
886  mjpegdecctx->bayer = 1;
887  }
888 
889  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
890  if (ret < 0) {
891  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
892  return ret;
893  }
894 
896  if (ret < 0) {
897  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
898 
899  /* Normally skip, error if explode */
900  if (avctx->err_recognition & AV_EF_EXPLODE)
901  return AVERROR_INVALIDDATA;
902  else
903  return 0;
904  }
905 
906  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
907 
908  /* See dng_blit for explanation */
909  is_single_comp = (s->avctx_mjpeg->width == w * 2 && s->avctx_mjpeg->height == h / 2);
910 
911  is_u16 = (s->bpp > 8);
912  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
913 
914  if (is_single_comp && !is_u16) {
915  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
917  return AVERROR_PATCHWELCOME;
918  }
919 
920  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
921  dst_data = frame->data[0] + dst_offset * pixel_size;
922  src_data = s->jpgframe->data[0];
923 
924  dng_blit(s,
925  dst_data,
926  frame->linesize[0] / pixel_size,
927  src_data,
928  s->jpgframe->linesize[0] / pixel_size,
929  w,
930  h,
931  is_single_comp,
932  is_u16);
933 
935 
936  return 0;
937 }
938 
939 static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
940 {
941  TiffContext *s = avctx->priv_data;
942  int tile_idx;
943  int tile_offset_offset, tile_offset;
944  int tile_byte_count_offset, tile_byte_count;
945  int tile_count_x, tile_count_y;
946  int tile_width, tile_length;
947  int has_width_leftover, has_height_leftover;
948  int tile_x = 0, tile_y = 0;
949  int pos_x = 0, pos_y = 0;
950  int ret;
951 
952  s->jpgframe->width = s->tile_width;
953  s->jpgframe->height = s->tile_length;
954 
955  s->avctx_mjpeg->width = s->tile_width;
956  s->avctx_mjpeg->height = s->tile_length;
957 
958  has_width_leftover = (s->width % s->tile_width != 0);
959  has_height_leftover = (s->height % s->tile_length != 0);
960 
961  /* Calculate tile counts (round up) */
962  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
963  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
964 
965  /* Iterate over the number of tiles */
966  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
967  tile_x = tile_idx % tile_count_x;
968  tile_y = tile_idx / tile_count_x;
969 
970  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
971  tile_width = s->width % s->tile_width;
972  else
973  tile_width = s->tile_width;
974 
975  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
976  tile_length = s->height % s->tile_length;
977  else
978  tile_length = s->tile_length;
979 
980  /* Read tile offset */
981  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
982  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
983  tile_offset = ff_tget_long(&s->gb, s->le);
984 
985  /* Read tile byte size */
986  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
987  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
988  tile_byte_count = ff_tget_long(&s->gb, s->le);
989 
990  /* Seek to tile data */
991  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
992 
993  /* Decode JPEG tile and copy it in the reference frame */
994  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
995 
996  if (ret < 0)
997  return ret;
998 
999  /* Advance current positions */
1000  pos_x += tile_width;
1001  if (tile_x == tile_count_x - 1) { // If on the right edge
1002  pos_x = 0;
1003  pos_y += tile_length;
1004  }
1005  }
1006 
1007  /* Frame is ready to be output */
1008  frame->pict_type = AV_PICTURE_TYPE_I;
1009  frame->key_frame = 1;
1010 
1011  return avpkt->size;
1012 }
1013 
1014 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
1015 {
1016  TiffContext *s = avctx->priv_data;
1017 
1018  s->jpgframe->width = s->width;
1019  s->jpgframe->height = s->height;
1020 
1021  s->avctx_mjpeg->width = s->width;
1022  s->avctx_mjpeg->height = s->height;
1023 
1024  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1025 }
1026 
1027 static int init_image(TiffContext *s, ThreadFrame *frame)
1028 {
1029  int ret;
1030  int create_gray_palette = 0;
1031 
1032  // make sure there is no aliasing in the following switch
1033  if (s->bpp >= 100 || s->bppcount >= 10) {
1035  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1036  s->bpp, s->bppcount);
1037  return AVERROR_INVALIDDATA;
1038  }
1039 
1040  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1041  case 11:
1042  if (!s->palette_is_set) {
1044  break;
1045  }
1046  case 21:
1047  case 41:
1049  if (!s->palette_is_set) {
1050  create_gray_palette = 1;
1051  }
1052  break;
1053  case 81:
1055  break;
1056  case 121:
1058  break;
1059  case 10081:
1060  switch (AV_RL32(s->pattern)) {
1061  case 0x02010100:
1063  break;
1064  case 0x00010102:
1066  break;
1067  case 0x01000201:
1069  break;
1070  case 0x01020001:
1072  break;
1073  default:
1074  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1075  AV_RL32(s->pattern));
1076  return AVERROR_PATCHWELCOME;
1077  }
1078  break;
1079  case 10101:
1080  case 10121:
1081  case 10141:
1082  case 10161:
1083  switch (AV_RL32(s->pattern)) {
1084  case 0x02010100:
1086  break;
1087  case 0x00010102:
1089  break;
1090  case 0x01000201:
1092  break;
1093  case 0x01020001:
1095  break;
1096  default:
1097  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1098  AV_RL32(s->pattern));
1099  return AVERROR_PATCHWELCOME;
1100  }
1101  break;
1102  case 243:
1103  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1104  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1106  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1108  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1110  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1112  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1114  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1116  } else {
1117  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1118  return AVERROR_PATCHWELCOME;
1119  }
1120  } else
1122  break;
1123  case 161:
1125  break;
1126  case 162:
1128  break;
1129  case 322:
1131  break;
1132  case 324:
1134  break;
1135  case 405:
1138  else {
1140  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1141  return AVERROR_PATCHWELCOME;
1142  }
1143  break;
1144  case 483:
1146  break;
1147  case 644:
1149  break;
1150  case 1243:
1152  break;
1153  case 1324:
1155  break;
1156  case 1483:
1158  break;
1159  case 1644:
1161  break;
1162  default:
1164  "This format is not supported (bpp=%d, bppcount=%d)\n",
1165  s->bpp, s->bppcount);
1166  return AVERROR_INVALIDDATA;
1167  }
1168 
1169  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1171  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1172  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1173  desc->nb_components < 3) {
1174  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1175  return AVERROR_INVALIDDATA;
1176  }
1177  }
1178 
1179  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1180  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1181  if (ret < 0)
1182  return ret;
1183  }
1184  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1185  return ret;
1186  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1187  if (!create_gray_palette)
1188  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1189  else {
1190  /* make default grayscale pal */
1191  int i;
1192  uint32_t *pal = (uint32_t *)frame->f->data[1];
1193  for (i = 0; i < 1<<s->bpp; i++)
1194  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1195  }
1196  }
1197  return 0;
1198 }
1199 
1200 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1201 {
1202  int offset = tag == TIFF_YRES ? 2 : 0;
1203  s->res[offset++] = num;
1204  s->res[offset] = den;
1205  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1206  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1207  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1208  if (num > INT64_MAX || den > INT64_MAX) {
1209  num = num >> 1;
1210  den = den >> 1;
1211  }
1213  num, den, INT32_MAX);
1214  if (!s->avctx->sample_aspect_ratio.den)
1215  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1216  }
1217 }
1218 
1219 static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
1220 {
1221  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1222  int i, start;
1223  int pos;
1224  int ret;
1225  double *dp;
1226 
1227  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1228  if (ret < 0) {
1229  goto end;
1230  }
1231 
1232  off = bytestream2_tell(&s->gb);
1233  if (count == 1) {
1234  switch (type) {
1235  case TIFF_BYTE:
1236  case TIFF_SHORT:
1237  case TIFF_LONG:
1238  value = ff_tget(&s->gb, type, s->le);
1239  break;
1240  case TIFF_RATIONAL:
1241  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1242  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1243  if (!value2) {
1244  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1245  return AVERROR_INVALIDDATA;
1246  }
1247 
1248  break;
1249  case TIFF_STRING:
1250  if (count <= 4) {
1251  break;
1252  }
1253  default:
1254  value = UINT_MAX;
1255  }
1256  }
1257 
1258  switch (tag) {
1259  case TIFF_SUBFILE:
1260  s->is_thumbnail = (value != 0);
1261  break;
1262  case TIFF_WIDTH:
1263  s->width = value;
1264  break;
1265  case TIFF_HEIGHT:
1266  s->height = value;
1267  break;
1268  case TIFF_BPP:
1269  if (count > 5U) {
1271  "This format is not supported (bpp=%d, %d components)\n",
1272  value, count);
1273  return AVERROR_INVALIDDATA;
1274  }
1275  s->bppcount = count;
1276  if (count == 1)
1277  s->bpp = value;
1278  else {
1279  switch (type) {
1280  case TIFF_BYTE:
1281  case TIFF_SHORT:
1282  case TIFF_LONG:
1283  s->bpp = 0;
1284  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1285  return AVERROR_INVALIDDATA;
1286  for (i = 0; i < count; i++)
1287  s->bpp += ff_tget(&s->gb, type, s->le);
1288  break;
1289  default:
1290  s->bpp = -1;
1291  }
1292  }
1293  break;
1295  if (count != 1) {
1297  "Samples per pixel requires a single value, many provided\n");
1298  return AVERROR_INVALIDDATA;
1299  }
1300  if (value > 5U) {
1302  "Samples per pixel %d is too large\n", value);
1303  return AVERROR_INVALIDDATA;
1304  }
1305  if (s->bppcount == 1)
1306  s->bpp *= value;
1307  s->bppcount = value;
1308  break;
1309  case TIFF_COMPR:
1310  s->compr = value;
1311  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1312  s->predictor = 0;
1313  switch (s->compr) {
1314  case TIFF_RAW:
1315  case TIFF_PACKBITS:
1316  case TIFF_LZW:
1317  case TIFF_CCITT_RLE:
1318  break;
1319  case TIFF_G3:
1320  case TIFF_G4:
1321  s->fax_opts = 0;
1322  break;
1323  case TIFF_DEFLATE:
1324  case TIFF_ADOBE_DEFLATE:
1325 #if CONFIG_ZLIB
1326  break;
1327 #else
1328  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1329  return AVERROR(ENOSYS);
1330 #endif
1331  case TIFF_JPEG:
1332  case TIFF_NEWJPEG:
1333  s->is_jpeg = 1;
1334  break;
1335  case TIFF_LZMA:
1336 #if CONFIG_LZMA
1337  break;
1338 #else
1339  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1340  return AVERROR(ENOSYS);
1341 #endif
1342  default:
1343  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1344  s->compr);
1345  return AVERROR_INVALIDDATA;
1346  }
1347  break;
1348  case TIFF_ROWSPERSTRIP:
1349  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1350  value = s->height;
1351  s->rps = FFMIN(value, s->height);
1352  break;
1353  case TIFF_STRIP_OFFS:
1354  if (count == 1) {
1355  if (value > INT_MAX) {
1357  "strippos %u too large\n", value);
1358  return AVERROR_INVALIDDATA;
1359  }
1360  s->strippos = 0;
1361  s->stripoff = value;
1362  } else
1363  s->strippos = off;
1364  s->strips = count;
1365  if (s->strips == 1)
1366  s->rps = s->height;
1367  s->sot = type;
1368  break;
1369  case TIFF_STRIP_SIZE:
1370  if (count == 1) {
1371  if (value > INT_MAX) {
1373  "stripsize %u too large\n", value);
1374  return AVERROR_INVALIDDATA;
1375  }
1376  s->stripsizesoff = 0;
1377  s->stripsize = value;
1378  s->strips = 1;
1379  } else {
1380  s->stripsizesoff = off;
1381  }
1382  s->strips = count;
1383  s->sstype = type;
1384  break;
1385  case TIFF_XRES:
1386  case TIFF_YRES:
1387  set_sar(s, tag, value, value2);
1388  break;
1389  case TIFF_TILE_OFFSETS:
1390  s->tile_offsets_offset = off;
1391  s->tile_count = count;
1392  s->is_tiled = 1;
1393  break;
1394  case TIFF_TILE_BYTE_COUNTS:
1395  s->tile_byte_counts_offset = off;
1396  break;
1397  case TIFF_TILE_LENGTH:
1398  s->tile_length = value;
1399  break;
1400  case TIFF_TILE_WIDTH:
1401  s->tile_width = value;
1402  break;
1403  case TIFF_PREDICTOR:
1404  s->predictor = value;
1405  break;
1406  case TIFF_SUB_IFDS:
1407  if (count == 1)
1408  s->sub_ifd = value;
1409  else if (count > 1)
1410  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1411  break;
1413  for (int i = 0; i < FFMIN(count, 1 << s->bpp); i++)
1414  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1415  break;
1416  case DNG_BLACK_LEVEL:
1417  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1418  if (type == TIFF_RATIONAL) {
1419  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1420  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1421  if (!value2) {
1422  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1423  return AVERROR_INVALIDDATA;
1424  }
1425 
1426  s->black_level = value / value2;
1427  } else
1428  s->black_level = ff_tget(&s->gb, type, s->le);
1429  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1430  } else {
1431  s->black_level = value / value2;
1432  }
1433  break;
1434  case DNG_WHITE_LEVEL:
1435  s->white_level = value;
1436  break;
1437  case TIFF_CFA_PATTERN_DIM:
1438  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1439  ff_tget(&s->gb, type, s->le) != 2)) {
1440  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1441  return AVERROR_INVALIDDATA;
1442  }
1443  break;
1444  case TIFF_CFA_PATTERN:
1445  s->is_bayer = 1;
1446  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1447  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1448  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1449  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1450  break;
1451  case TIFF_PHOTOMETRIC:
1452  switch (value) {
1455  case TIFF_PHOTOMETRIC_RGB:
1459  case TIFF_PHOTOMETRIC_CFA:
1460  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1461  s->photometric = value;
1462  break;
1470  "PhotometricInterpretation 0x%04X",
1471  value);
1472  return AVERROR_PATCHWELCOME;
1473  default:
1474  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1475  "unknown\n", value);
1476  return AVERROR_INVALIDDATA;
1477  }
1478  break;
1479  case TIFF_FILL_ORDER:
1480  if (value < 1 || value > 2) {
1482  "Unknown FillOrder value %d, trying default one\n", value);
1483  value = 1;
1484  }
1485  s->fill_order = value - 1;
1486  break;
1487  case TIFF_PAL: {
1488  GetByteContext pal_gb[3];
1489  off = type_sizes[type];
1490  if (count / 3 > 256 ||
1491  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1492  return AVERROR_INVALIDDATA;
1493 
1494  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1495  bytestream2_skip(&pal_gb[1], count / 3 * off);
1496  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1497 
1498  off = (type_sizes[type] - 1) << 3;
1499  if (off > 31U) {
1500  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1501  return AVERROR_INVALIDDATA;
1502  }
1503 
1504  for (i = 0; i < count / 3; i++) {
1505  uint32_t p = 0xFF000000;
1506  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1507  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1508  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1509  s->palette[i] = p;
1510  }
1511  s->palette_is_set = 1;
1512  break;
1513  }
1514  case TIFF_PLANAR:
1515  s->planar = value == 2;
1516  break;
1518  if (count != 2) {
1519  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1520  return AVERROR_INVALIDDATA;
1521  }
1522  for (i = 0; i < count; i++) {
1523  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1524  if (s->subsampling[i] <= 0) {
1525  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1526  s->subsampling[i] = 1;
1527  return AVERROR_INVALIDDATA;
1528  }
1529  }
1530  break;
1531  case TIFF_T4OPTIONS:
1532  if (s->compr == TIFF_G3)
1533  s->fax_opts = value;
1534  break;
1535  case TIFF_T6OPTIONS:
1536  if (s->compr == TIFF_G4)
1537  s->fax_opts = value;
1538  break;
1539 #define ADD_METADATA(count, name, sep)\
1540  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1541  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1542  goto end;\
1543  }
1545  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1546  break;
1548  ADD_METADATA(count, "ModelTransformationTag", NULL);
1549  break;
1550  case TIFF_MODEL_TIEPOINT:
1551  ADD_METADATA(count, "ModelTiepointTag", NULL);
1552  break;
1554  if (s->geotag_count) {
1555  avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
1556  return AVERROR_INVALIDDATA;
1557  }
1558  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1559  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1560  s->geotag_count = ff_tget_short(&s->gb, s->le);
1561  if (s->geotag_count > count / 4 - 1) {
1562  s->geotag_count = count / 4 - 1;
1563  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1564  }
1565  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1566  || s->geotag_count == 0) {
1567  s->geotag_count = 0;
1568  return -1;
1569  }
1570  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1571  if (!s->geotags) {
1572  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1573  s->geotag_count = 0;
1574  goto end;
1575  }
1576  for (i = 0; i < s->geotag_count; i++) {
1577  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1578  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1579  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1580 
1581  if (!s->geotags[i].type)
1582  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1583  else
1584  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1585  }
1586  break;
1588  if (count >= INT_MAX / sizeof(int64_t))
1589  return AVERROR_INVALIDDATA;
1590  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1591  return AVERROR_INVALIDDATA;
1592  dp = av_malloc_array(count, sizeof(double));
1593  if (!dp) {
1594  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1595  goto end;
1596  }
1597  for (i = 0; i < count; i++)
1598  dp[i] = ff_tget_double(&s->gb, s->le);
1599  for (i = 0; i < s->geotag_count; i++) {
1600  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1601  if (s->geotags[i].count == 0
1602  || s->geotags[i].offset + s->geotags[i].count > count) {
1603  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1604  } else if (s->geotags[i].val) {
1605  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1606  } else {
1607  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1608  if (!ap) {
1609  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1610  av_freep(&dp);
1611  return AVERROR(ENOMEM);
1612  }
1613  s->geotags[i].val = ap;
1614  }
1615  }
1616  }
1617  av_freep(&dp);
1618  break;
1619  case TIFF_GEO_ASCII_PARAMS:
1620  pos = bytestream2_tell(&s->gb);
1621  for (i = 0; i < s->geotag_count; i++) {
1622  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1623  if (s->geotags[i].count == 0
1624  || s->geotags[i].offset + s->geotags[i].count > count) {
1625  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1626  } else {
1627  char *ap;
1628 
1629  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1630  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1631  return AVERROR_INVALIDDATA;
1632  if (s->geotags[i].val)
1633  return AVERROR_INVALIDDATA;
1634  ap = av_malloc(s->geotags[i].count);
1635  if (!ap) {
1636  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1637  return AVERROR(ENOMEM);
1638  }
1639  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1640  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1641  s->geotags[i].val = ap;
1642  }
1643  }
1644  }
1645  break;
1646  case TIFF_ARTIST:
1647  ADD_METADATA(count, "artist", NULL);
1648  break;
1649  case TIFF_COPYRIGHT:
1650  ADD_METADATA(count, "copyright", NULL);
1651  break;
1652  case TIFF_DATE:
1653  ADD_METADATA(count, "date", NULL);
1654  break;
1655  case TIFF_DOCUMENT_NAME:
1656  ADD_METADATA(count, "document_name", NULL);
1657  break;
1658  case TIFF_HOST_COMPUTER:
1659  ADD_METADATA(count, "computer", NULL);
1660  break;
1662  ADD_METADATA(count, "description", NULL);
1663  break;
1664  case TIFF_MAKE:
1665  ADD_METADATA(count, "make", NULL);
1666  break;
1667  case TIFF_MODEL:
1668  ADD_METADATA(count, "model", NULL);
1669  break;
1670  case TIFF_PAGE_NAME:
1671  ADD_METADATA(count, "page_name", NULL);
1672  break;
1673  case TIFF_PAGE_NUMBER:
1674  ADD_METADATA(count, "page_number", " / ");
1675  // need to seek back to re-read the page number
1676  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1677  // read the page number
1678  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1679  // get back to where we were before the previous seek
1680  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1681  break;
1682  case TIFF_SOFTWARE_NAME:
1683  ADD_METADATA(count, "software", NULL);
1684  break;
1685  case DNG_VERSION:
1686  if (count == 4) {
1687  unsigned int ver[4];
1688  ver[0] = ff_tget(&s->gb, type, s->le);
1689  ver[1] = ff_tget(&s->gb, type, s->le);
1690  ver[2] = ff_tget(&s->gb, type, s->le);
1691  ver[3] = ff_tget(&s->gb, type, s->le);
1692 
1693  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1694  ver[0], ver[1], ver[2], ver[3]);
1695 
1697  }
1698  break;
1699  case CINEMADNG_TIME_CODES:
1700  case CINEMADNG_FRAME_RATE:
1701  case CINEMADNG_T_STOP:
1702  case CINEMADNG_REEL_NAME:
1705  break;
1706  default:
1707  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1709  "Unknown or unsupported tag %d/0x%0X\n",
1710  tag, tag);
1711  return AVERROR_INVALIDDATA;
1712  }
1713  }
1714 end:
1715  if (s->bpp > 64U) {
1717  "This format is not supported (bpp=%d, %d components)\n",
1718  s->bpp, count);
1719  s->bpp = 0;
1720  return AVERROR_INVALIDDATA;
1721  }
1722  bytestream2_seek(&s->gb, start, SEEK_SET);
1723  return 0;
1724 }
1725 
1726 static int decode_frame(AVCodecContext *avctx,
1727  void *data, int *got_frame, AVPacket *avpkt)
1728 {
1729  TiffContext *const s = avctx->priv_data;
1730  AVFrame *const p = data;
1731  ThreadFrame frame = { .f = data };
1732  unsigned off, last_off;
1733  int le, ret, plane, planes;
1734  int i, j, entries, stride;
1735  unsigned soff, ssize;
1736  uint8_t *dst;
1737  GetByteContext stripsizes;
1738  GetByteContext stripdata;
1739  int retry_for_subifd, retry_for_page;
1740  int is_dng;
1741 
1742  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1743 
1744  // parse image header
1745  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1746  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1747  return ret;
1748  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1749  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1750  return AVERROR_INVALIDDATA;
1751  }
1752  s->le = le;
1753  // TIFF_BPP is not a required tag and defaults to 1
1754 
1756 again:
1757  s->is_thumbnail = 0;
1758  s->bppcount = s->bpp = 1;
1760  s->compr = TIFF_RAW;
1761  s->fill_order = 0;
1762  s->white_level = 0;
1763  s->is_bayer = 0;
1764  s->is_tiled = 0;
1765  s->is_jpeg = 0;
1766  s->cur_page = 0;
1767 
1768  for (i = 0; i < 65536; i++)
1769  s->dng_lut[i] = i;
1770 
1771  free_geotags(s);
1772 
1773  // Reset these offsets so we can tell if they were set this frame
1774  s->stripsizesoff = s->strippos = 0;
1775  /* parse image file directory */
1776  bytestream2_seek(&s->gb, off, SEEK_SET);
1777  entries = ff_tget_short(&s->gb, le);
1778  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1779  return AVERROR_INVALIDDATA;
1780  for (i = 0; i < entries; i++) {
1781  if ((ret = tiff_decode_tag(s, p)) < 0)
1782  return ret;
1783  }
1784 
1785  if (s->get_thumbnail && !s->is_thumbnail) {
1786  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1787  return AVERROR_EOF;
1788  }
1789 
1790  /** whether we should process this IFD's SubIFD */
1791  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1792  /** whether we should process this multi-page IFD's next page */
1793  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1794 
1795  last_off = off;
1796  if (retry_for_page) {
1797  // set offset to the next IFD
1798  off = ff_tget_long(&s->gb, le);
1799  } else if (retry_for_subifd) {
1800  // set offset to the SubIFD
1801  off = s->sub_ifd;
1802  }
1803 
1804  if (retry_for_subifd || retry_for_page) {
1805  if (!off) {
1806  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1807  return AVERROR_INVALIDDATA;
1808  }
1809  if (off <= last_off) {
1810  avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
1811  return AVERROR_INVALIDDATA;
1812  }
1813  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1814  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1815  return AVERROR_INVALIDDATA;
1816  }
1817  s->sub_ifd = 0;
1818  goto again;
1819  }
1820 
1821  /* At this point we've decided on which (Sub)IFD to process */
1822 
1823  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1824 
1825  for (i = 0; i<s->geotag_count; i++) {
1826  const char *keyname = get_geokey_name(s->geotags[i].key);
1827  if (!keyname) {
1828  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1829  continue;
1830  }
1831  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1832  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1833  continue;
1834  }
1835  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1836  if (ret<0) {
1837  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1838  return ret;
1839  }
1840  }
1841 
1842  if (is_dng) {
1843  if (s->white_level == 0)
1844  s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
1845 
1846  if (s->white_level <= s->black_level) {
1847  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1848  s->black_level, s->white_level);
1849  return AVERROR_INVALIDDATA;
1850  }
1851  }
1852 
1853  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1854  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1855  return AVERROR_INVALIDDATA;
1856  }
1857 
1858  /* now we have the data and may start decoding */
1859  if ((ret = init_image(s, &frame)) < 0)
1860  return ret;
1861 
1862  if (!s->is_tiled) {
1863  if (s->strips == 1 && !s->stripsize) {
1864  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1865  s->stripsize = avpkt->size - s->stripoff;
1866  }
1867 
1868  if (s->stripsizesoff) {
1869  if (s->stripsizesoff >= (unsigned)avpkt->size)
1870  return AVERROR_INVALIDDATA;
1871  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1872  avpkt->size - s->stripsizesoff);
1873  }
1874  if (s->strippos) {
1875  if (s->strippos >= (unsigned)avpkt->size)
1876  return AVERROR_INVALIDDATA;
1877  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1878  avpkt->size - s->strippos);
1879  }
1880 
1881  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1882  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1883  return AVERROR_INVALIDDATA;
1884  }
1885  }
1886 
1890  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1892  }
1893 
1894  /* Handle DNG images with JPEG-compressed tiles */
1895 
1896  if (is_dng && s->is_tiled) {
1897  if (!s->is_jpeg) {
1898  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1899  return AVERROR_PATCHWELCOME;
1900  } else if (!s->is_bayer) {
1901  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1902  return AVERROR_PATCHWELCOME;
1903  } else {
1904  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1905  *got_frame = 1;
1906  return ret;
1907  }
1908  }
1909 
1910  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1911 
1912  planes = s->planar ? s->bppcount : 1;
1913  for (plane = 0; plane < planes; plane++) {
1914  uint8_t *five_planes = NULL;
1915  int remaining = avpkt->size;
1916  int decoded_height;
1917  stride = p->linesize[plane];
1918  dst = p->data[plane];
1920  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1921  stride = stride * 5 / 4;
1922  five_planes =
1923  dst = av_malloc(stride * s->height);
1924  if (!dst)
1925  return AVERROR(ENOMEM);
1926  }
1927  for (i = 0; i < s->height; i += s->rps) {
1928  if (i)
1929  dst += s->rps * stride;
1930  if (s->stripsizesoff)
1931  ssize = ff_tget(&stripsizes, s->sstype, le);
1932  else
1933  ssize = s->stripsize;
1934 
1935  if (s->strippos)
1936  soff = ff_tget(&stripdata, s->sot, le);
1937  else
1938  soff = s->stripoff;
1939 
1940  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
1941  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
1942  av_freep(&five_planes);
1943  return AVERROR_INVALIDDATA;
1944  }
1945  remaining -= ssize;
1946  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
1947  FFMIN(s->rps, s->height - i))) < 0) {
1948  if (avctx->err_recognition & AV_EF_EXPLODE) {
1949  av_freep(&five_planes);
1950  return ret;
1951  }
1952  break;
1953  }
1954  }
1955  decoded_height = FFMIN(i, s->height);
1956 
1957  if (s->predictor == 2) {
1958  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1959  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
1960  return AVERROR_PATCHWELCOME;
1961  }
1962  dst = five_planes ? five_planes : p->data[plane];
1963  soff = s->bpp >> 3;
1964  if (s->planar)
1965  soff = FFMAX(soff / s->bppcount, 1);
1966  ssize = s->width * soff;
1967  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
1970  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
1973  for (i = 0; i < decoded_height; i++) {
1974  for (j = soff; j < ssize; j += 2)
1975  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
1976  dst += stride;
1977  }
1978  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1981  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
1984  for (i = 0; i < decoded_height; i++) {
1985  for (j = soff; j < ssize; j += 2)
1986  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
1987  dst += stride;
1988  }
1989  } else {
1990  for (i = 0; i < decoded_height; i++) {
1991  for (j = soff; j < ssize; j++)
1992  dst[j] += dst[j - soff];
1993  dst += stride;
1994  }
1995  }
1996  }
1997 
1999  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2000  dst = p->data[plane];
2001  for (i = 0; i < s->height; i++) {
2002  for (j = 0; j < stride; j++)
2003  dst[j] = c - dst[j];
2004  dst += stride;
2005  }
2006  }
2007 
2010  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2011  uint8_t *src = five_planes ? five_planes : p->data[plane];
2012  dst = p->data[plane];
2013  for (i = 0; i < s->height; i++) {
2014  for (j = 0; j < s->width; j++) {
2015  int k = 255 - src[x * j + 3];
2016  int r = (255 - src[x * j ]) * k;
2017  int g = (255 - src[x * j + 1]) * k;
2018  int b = (255 - src[x * j + 2]) * k;
2019  dst[4 * j ] = r * 257 >> 16;
2020  dst[4 * j + 1] = g * 257 >> 16;
2021  dst[4 * j + 2] = b * 257 >> 16;
2022  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2023  }
2024  src += stride;
2025  dst += p->linesize[plane];
2026  }
2027  av_freep(&five_planes);
2028  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2030  dst = p->data[plane];
2031  for (i = 0; i < s->height; i++) {
2032  for (j = 0; j < s->width; j++) {
2033  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2034  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2035  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2036  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2037  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2038  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2039  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2040  AV_WB16(dst + 8 * j + 6, 65535);
2041  }
2042  dst += p->linesize[plane];
2043  }
2044  }
2045  }
2046 
2047  if (s->planar && s->bppcount > 2) {
2048  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2049  FFSWAP(int, p->linesize[0], p->linesize[2]);
2050  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2051  FFSWAP(int, p->linesize[0], p->linesize[1]);
2052  }
2053 
2054  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2055  uint16_t *dst = (uint16_t *)p->data[0];
2056  for (i = 0; i < s->height; i++) {
2057  for (j = 0; j < s->width; j++)
2058  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2059  dst += stride / 2;
2060  }
2061  }
2062 
2063  *got_frame = 1;
2064 
2065  return avpkt->size;
2066 }
2067 
2069 {
2070  TiffContext *s = avctx->priv_data;
2071  const AVCodec *codec;
2072  int ret;
2073 
2074  s->width = 0;
2075  s->height = 0;
2076  s->subsampling[0] =
2077  s->subsampling[1] = 1;
2078  s->avctx = avctx;
2079  ff_lzw_decode_open(&s->lzw);
2080  if (!s->lzw)
2081  return AVERROR(ENOMEM);
2083 
2084  /* Allocate JPEG frame */
2085  s->jpgframe = av_frame_alloc();
2086  if (!s->jpgframe)
2087  return AVERROR(ENOMEM);
2088 
2089  /* Prepare everything needed for JPEG decoding */
2091  if (!codec)
2092  return AVERROR_BUG;
2093  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2094  if (!s->avctx_mjpeg)
2095  return AVERROR(ENOMEM);
2096  s->avctx_mjpeg->flags = avctx->flags;
2097  s->avctx_mjpeg->flags2 = avctx->flags2;
2098  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2099  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2100  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2101  if (ret < 0) {
2102  return ret;
2103  }
2104 
2105  return 0;
2106 }
2107 
2108 static av_cold int tiff_end(AVCodecContext *avctx)
2109 {
2110  TiffContext *const s = avctx->priv_data;
2111 
2112  free_geotags(s);
2113 
2114  ff_lzw_decode_close(&s->lzw);
2115  av_freep(&s->deinvert_buf);
2116  s->deinvert_buf_size = 0;
2117  av_freep(&s->yuv_line);
2118  s->yuv_line_size = 0;
2119  av_freep(&s->fax_buffer);
2120  s->fax_buffer_size = 0;
2121  av_frame_free(&s->jpgframe);
2123  return 0;
2124 }
2125 
2126 #define OFFSET(x) offsetof(TiffContext, x)
2127 static const AVOption tiff_options[] = {
2128  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2129  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2130  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2131  { NULL },
2132 };
2133 
2134 static const AVClass tiff_decoder_class = {
2135  .class_name = "TIFF decoder",
2136  .item_name = av_default_item_name,
2137  .option = tiff_options,
2138  .version = LIBAVUTIL_VERSION_INT,
2139 };
2140 
2142  .name = "tiff",
2143  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2144  .type = AVMEDIA_TYPE_VIDEO,
2145  .id = AV_CODEC_ID_TIFF,
2146  .priv_data_size = sizeof(TiffContext),
2147  .init = tiff_init,
2148  .close = tiff_end,
2149  .decode = decode_frame,
2151  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2152  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2153  .priv_class = &tiff_decoder_class,
2154 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
Definition: tiff.h:64
int plane
Definition: avisynth_c.h:384
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:800
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
Definition: tiff.h:120
int offset
Definition: tiff.h:210
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:807
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values ...
Definition: tiff.h:182
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:2761
AVOption.
Definition: opt.h:246
int tile_offsets_offset
Definition: tiff.c:99
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:477
int fill_order
Definition: tiff.c:79
unsigned int bpp
Definition: tiff.c:69
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
int geotag_count
Definition: tiff.c:112
uint16_t dng_lut[65536]
Definition: tiff.c:87
uint32_t res[4]
Definition: tiff.c:80
Definition: tiff.h:63
int sstype
Definition: tiff.c:92
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
uint8_t pattern[4]
Definition: tiff.c:84
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
const char * g
Definition: vf_curves.c:115
const char *const name
Definition: tiff.h:216
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
const uint8_t ff_reverse[256]
Definition: reverse.c:23
#define avpriv_request_sample(...)
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
Definition: pixfmt.h:262
Definition: tiff.h:57
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
Definition: pixfmt.h:263
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
unsigned white_level
Definition: tiff.c:86
int size
Definition: avcodec.h:1484
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:162
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1950
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1781
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
GLint GLenum type
Definition: opengl_enc.c:104
static void free_geotags(TiffContext *const s)
Definition: tiff.c:121
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
const char * key
uint8_t * fax_buffer
Definition: tiff.c:109
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
Definition: pixfmt.h:260
enum TiffType tiff_type
Definition: tiff.c:67
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
#define src
Definition: vp8dsp.c:254
unsigned int yuv_line_size
Definition: tiff.c:108
AVCodec.
Definition: avcodec.h:3495
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
static const struct @322 planes[]
Definition: tiff.h:124
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
Definition: tiff.h:123
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1726
Macro definitions for various function/variable attributes.
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:556
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1219
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int deinvert_buf_size
Definition: tiff.c:106
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int get_subimage
Definition: tiff.c:63
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5) ...
Definition: tiff.c:779
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
char * val
Definition: tiff.h:211
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:473
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2068
int is_thumbnail
Definition: tiff.c:81
int get_thumbnail
Definition: tiff.c:65
uint8_t * data
Definition: avcodec.h:1483
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1531
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:265
int stripoff
Definition: tiff.c:94
#define AVERROR_EOF
End of file.
Definition: error.h:55
bitstream reader API header.
AVDictionary * metadata
metadata.
Definition: frame.h:581
int tile_count
Definition: tiff.c:101
ptrdiff_t size
Definition: opengl_enc.c:100
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
LZWState * lzw
Definition: tiff.c:95
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:414
#define av_log(a,...)
Definition: tiff.h:78
int planar
Definition: tiff.c:75
#define U(x)
Definition: vp56_arith.h:37
Definition: lzw.c:46
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int height
Definition: tiff.c:68
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
error code definitions
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:536
unsigned int fax_buffer_size
Definition: tiff.c:110
enum TiffGeoTagKey key
Definition: tiff.h:207
int sot
Definition: tiff.c:93
TIFF data tables.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define pv
Definition: regdef.h:60
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2108
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:739
const char * r
Definition: vf_curves.c:114
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:335
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1651
uint16_t get_page
Definition: tiff.c:64
const char * name
Name of the codec implementation.
Definition: avcodec.h:3502
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1200
int width
Definition: tiff.c:68
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:521
int strips
Definition: tiff.c:92
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:381
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
#define FFMAX(a, b)
Definition: common.h:94
uint8_t * yuv_line
Definition: tiff.c:107
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1043
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int predictor
Definition: tiff.c:78
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
enum TiffPhotometric photometric
Definition: tiff.c:74
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int stripsize
Definition: tiff.c:94
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
static const AVOption tiff_options[]
Definition: tiff.c:2127
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2700
#define FFMIN(a, b)
Definition: common.h:96
uint32_t sub_ifd
Definition: tiff.c:89
int le
Definition: tiff.c:72
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1014
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
int width
picture width / height.
Definition: avcodec.h:1744
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:2774
uint8_t w
Definition: llviddspenc.c:38
int rps
Definition: tiff.c:92
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:361
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:116
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int is_bayer
Definition: tiff.c:83
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2711
int n
Definition: avisynth_c.h:760
int is_tiled
Definition: tiff.c:98
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:280
int is_jpeg
Definition: tiff.c:103
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:411
#define FF_ARRAY_ELEMS(a)
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
if(ret)
int palette_is_set
Definition: tiff.c:71
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:676
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:413
uint32_t palette[256]
Definition: tiff.c:70
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
Definition: tiff.h:51
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const char * get_geokey_name(int key)
Definition: tiff.c:137
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:119
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
enum TiffCompr compr
Definition: tiff.c:73
unsigned int bppcount
Definition: tiff.c:69
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
main external API structure.
Definition: avcodec.h:1571
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:898
Definition: tiff.h:122
void * buf
Definition: avisynth_c.h:766
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVCodecContext * avctx
Definition: tiff.c:56
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int subsampling[2]
Definition: tiff.c:76
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
#define snprintf
Definition: snprintf.h:34
uint16_t cur_page
Definition: tiff.c:90
static int get_geokey_type(int key)
Definition: tiff.c:147
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
int tile_length
Definition: tiff.c:100
int strippos
Definition: tiff.c:94
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:867
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int count
Definition: tiff.h:209
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:412
enum TiffTags type
Definition: tiff.h:208
LZW decoding routines.
#define OFFSET(x)
Definition: tiff.c:2126
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:939
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int stripsizesoff
Definition: tiff.c:94
Y , 8bpp.
Definition: pixfmt.h:74
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
int tile_byte_counts_offset
Definition: tiff.c:99
uint8_t * deinvert_buf
Definition: tiff.c:105
common internal api header.
static char * get_geokey_val(int key, int val)
Definition: tiff.c:171
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:328
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:239
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
Definition: pixfmt.h:261
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
AVFrame * jpgframe
Definition: tiff.c:61
int den
Denominator.
Definition: rational.h:60
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
#define RET_GEOKEY_VAL(TYPE, array)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:796
int tile_width
Definition: tiff.c:100
void * priv_data
Definition: avcodec.h:1598
#define av_free(p)
int pixels
Definition: avisynth_c.h:390
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
int len
Digital Negative (DNG) image.
Definition: tiff.h:41
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1658
int fax_opts
Definition: tiff.c:77
static const AVClass tiff_decoder_class
Definition: tiff.c:2134
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:132
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
unsigned black_level
Definition: tiff.c:85
int height
Definition: frame.h:353
#define av_freep(p)
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1027
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
void INT64 start
Definition: avisynth_c.h:766
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:543
AVCodec ff_tiff_decoder
Definition: tiff.c:2141
#define av_always_inline
Definition: attributes.h:39
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext gb
Definition: tiff.c:57
#define stride
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:157
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffGeoTag * geotags
Definition: tiff.c:113
MJPEG decoder.
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
#define ADD_METADATA(count, name, sep)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
Definition: tiff.h:74
This structure stores compressed data.
Definition: avcodec.h:1460
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:987
for(j=16;j >0;--j)
CCITT Fax Group 3 and 4 decompression.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:348
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
const char * name
Definition: opengl_enc.c:102