FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82 
83  int is_bayer;
85  unsigned black_level;
86  unsigned white_level;
87  uint16_t dng_lut[65536];
88 
89  uint32_t sub_ifd;
90  uint16_t cur_page;
91 
92  int strips, rps, sstype;
93  int sot;
96 
97  /* Tile support */
98  int is_tiled;
102 
103  int is_jpeg;
104 
108  unsigned int yuv_line_size;
110  unsigned int fax_buffer_size;
111 
114 } TiffContext;
115 
117  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
118  s->tiff_type = tiff_type;
119 }
120 
121 static void free_geotags(TiffContext *const s)
122 {
123  int i;
124  for (i = 0; i < s->geotag_count; i++) {
125  if (s->geotags[i].val)
126  av_freep(&s->geotags[i].val);
127  }
128  av_freep(&s->geotags);
129  s->geotag_count = 0;
130 }
131 
132 #define RET_GEOKEY(TYPE, array, element)\
133  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
134  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
135  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
136 
137 static const char *get_geokey_name(int key)
138 {
139  RET_GEOKEY(VERT, vert, name);
140  RET_GEOKEY(PROJ, proj, name);
141  RET_GEOKEY(GEOG, geog, name);
142  RET_GEOKEY(CONF, conf, name);
143 
144  return NULL;
145 }
146 
147 static int get_geokey_type(int key)
148 {
149  RET_GEOKEY(VERT, vert, type);
150  RET_GEOKEY(PROJ, proj, type);
151  RET_GEOKEY(GEOG, geog, type);
152  RET_GEOKEY(CONF, conf, type);
153 
154  return AVERROR_INVALIDDATA;
155 }
156 
157 static int cmp_id_key(const void *id, const void *k)
158 {
159  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
160 }
161 
162 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
163 {
164  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
165  if(r)
166  return r->name;
167 
168  return NULL;
169 }
170 
171 static char *get_geokey_val(int key, int val)
172 {
173  char *ap;
174 
175  if (val == TIFF_GEO_KEY_UNDEFINED)
176  return av_strdup("undefined");
177  if (val == TIFF_GEO_KEY_USER_DEFINED)
178  return av_strdup("User-Defined");
179 
180 #define RET_GEOKEY_VAL(TYPE, array)\
181  if (val >= TIFF_##TYPE##_OFFSET &&\
182  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
183  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
184 
185  switch (key) {
187  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
188  break;
190  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
191  break;
195  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
196  break;
199  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
200  break;
202  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
203  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
204  break;
206  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
207  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
208  break;
210  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
211  break;
213  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
214  break;
217  if(ap) return ap;
218  break;
221  if(ap) return ap;
222  break;
224  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
225  break;
227  RET_GEOKEY_VAL(VERT_CS, vert_cs);
228  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
229  break;
230 
231  }
232 
233  ap = av_malloc(14);
234  if (ap)
235  snprintf(ap, 14, "Unknown-%d", val);
236  return ap;
237 }
238 
239 static char *doubles2str(double *dp, int count, const char *sep)
240 {
241  int i;
242  char *ap, *ap0;
243  uint64_t component_len;
244  if (!sep) sep = ", ";
245  component_len = 24LL + strlen(sep);
246  if (count >= (INT_MAX - 1)/component_len)
247  return NULL;
248  ap = av_malloc(component_len * count + 1);
249  if (!ap)
250  return NULL;
251  ap0 = ap;
252  ap[0] = '\0';
253  for (i = 0; i < count; i++) {
254  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
255  if(l >= component_len) {
256  av_free(ap0);
257  return NULL;
258  }
259  ap += l;
260  }
261  ap0[strlen(ap0) - strlen(sep)] = '\0';
262  return ap0;
263 }
264 
265 static int add_metadata(int count, int type,
266  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
267 {
268  switch(type) {
269  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
270  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
271  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
272  default : return AVERROR_INVALIDDATA;
273  };
274 }
275 
276 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
277  const uint8_t *src, int src_stride, int width, int height,
278  int is_single_comp, int is_u16);
279 
281  unsigned int bpp, uint8_t* dst,
282  int usePtr, const uint8_t *src,
283  uint8_t c, int width, int offset)
284 {
285  switch (bpp) {
286  case 1:
287  while (--width >= 0) {
288  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
289  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
290  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
291  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
292  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
293  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
294  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
295  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
296  }
297  break;
298  case 2:
299  while (--width >= 0) {
300  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
301  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
302  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
303  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
304  }
305  break;
306  case 4:
307  while (--width >= 0) {
308  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
309  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
310  }
311  break;
312  case 10:
313  case 12:
314  case 14: {
315  uint16_t *dst16 = (uint16_t *)dst;
316  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
317  uint8_t shift = is_dng ? 0 : 16 - bpp;
319 
320  init_get_bits8(&gb, src, width);
321  for (int i = 0; i < s->width; i++) {
322  dst16[i] = get_bits(&gb, bpp) << shift;
323  }
324  }
325  break;
326  default:
327  if (usePtr) {
328  memcpy(dst + offset, src, width);
329  } else {
330  memset(dst + offset, c, width);
331  }
332  }
333 }
334 
335 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
336 {
337  int i;
338 
340  if (!s->deinvert_buf)
341  return AVERROR(ENOMEM);
342  for (i = 0; i < size; i++)
343  s->deinvert_buf[i] = ff_reverse[src[i]];
344 
345  return 0;
346 }
347 
348 static void unpack_gray(TiffContext *s, AVFrame *p,
349  const uint8_t *src, int lnum, int width, int bpp)
350 {
352  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
353 
354  init_get_bits8(&gb, src, width);
355 
356  for (int i = 0; i < s->width; i++) {
357  dst[i] = get_bits(&gb, bpp);
358  }
359 }
360 
361 static void unpack_yuv(TiffContext *s, AVFrame *p,
362  const uint8_t *src, int lnum)
363 {
364  int i, j, k;
365  int w = (s->width - 1) / s->subsampling[0] + 1;
366  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
367  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
368  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
369  for (i = 0; i < w; i++) {
370  for (j = 0; j < s->subsampling[1]; j++)
371  for (k = 0; k < s->subsampling[0]; k++)
372  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
373  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
374  *pu++ = *src++;
375  *pv++ = *src++;
376  }
377  }else{
378  for (i = 0; i < w; i++) {
379  for (j = 0; j < s->subsampling[1]; j++)
380  for (k = 0; k < s->subsampling[0]; k++)
381  p->data[0][(lnum + j) * p->linesize[0] +
382  i * s->subsampling[0] + k] = *src++;
383  *pu++ = *src++;
384  *pv++ = *src++;
385  }
386  }
387 }
388 
389 #if CONFIG_ZLIB
390 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
391  int size)
392 {
393  z_stream zstream = { 0 };
394  int zret;
395 
396  zstream.next_in = (uint8_t *)src;
397  zstream.avail_in = size;
398  zstream.next_out = dst;
399  zstream.avail_out = *len;
400  zret = inflateInit(&zstream);
401  if (zret != Z_OK) {
402  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
403  return zret;
404  }
405  zret = inflate(&zstream, Z_SYNC_FLUSH);
406  inflateEnd(&zstream);
407  *len = zstream.total_out;
408  return zret == Z_STREAM_END ? Z_OK : zret;
409 }
410 
411 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
412  const uint8_t *src, int size, int width, int lines,
413  int strip_start, int is_yuv)
414 {
415  uint8_t *zbuf;
416  unsigned long outlen;
417  int ret, line;
418  outlen = width * lines;
419  zbuf = av_malloc(outlen);
420  if (!zbuf)
421  return AVERROR(ENOMEM);
422  if (s->fill_order) {
423  if ((ret = deinvert_buffer(s, src, size)) < 0) {
424  av_free(zbuf);
425  return ret;
426  }
427  src = s->deinvert_buf;
428  }
429  ret = tiff_uncompress(zbuf, &outlen, src, size);
430  if (ret != Z_OK) {
432  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
433  (unsigned long)width * lines, ret);
434  av_free(zbuf);
435  return AVERROR_UNKNOWN;
436  }
437  src = zbuf;
438  for (line = 0; line < lines; line++) {
439  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
440  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
441  } else {
442  memcpy(dst, src, width);
443  }
444  if (is_yuv) {
445  unpack_yuv(s, p, dst, strip_start + line);
446  line += s->subsampling[1] - 1;
447  }
448  dst += stride;
449  src += width;
450  }
451  av_free(zbuf);
452  return 0;
453 }
454 #endif
455 
456 #if CONFIG_LZMA
457 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
458  int size)
459 {
460  lzma_stream stream = LZMA_STREAM_INIT;
461  lzma_ret ret;
462 
463  stream.next_in = (uint8_t *)src;
464  stream.avail_in = size;
465  stream.next_out = dst;
466  stream.avail_out = *len;
467  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
468  if (ret != LZMA_OK) {
469  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
470  return ret;
471  }
472  ret = lzma_code(&stream, LZMA_RUN);
473  lzma_end(&stream);
474  *len = stream.total_out;
475  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
476 }
477 
478 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
479  const uint8_t *src, int size, int width, int lines,
480  int strip_start, int is_yuv)
481 {
482  uint64_t outlen = width * (uint64_t)lines;
483  int ret, line;
484  uint8_t *buf = av_malloc(outlen);
485  if (!buf)
486  return AVERROR(ENOMEM);
487  if (s->fill_order) {
488  if ((ret = deinvert_buffer(s, src, size)) < 0) {
489  av_free(buf);
490  return ret;
491  }
492  src = s->deinvert_buf;
493  }
494  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
495  if (ret != LZMA_OK) {
497  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
498  (uint64_t)width * lines, ret);
499  av_free(buf);
500  return AVERROR_UNKNOWN;
501  }
502  src = buf;
503  for (line = 0; line < lines; line++) {
504  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
505  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
506  } else {
507  memcpy(dst, src, width);
508  }
509  if (is_yuv) {
510  unpack_yuv(s, p, dst, strip_start + line);
511  line += s->subsampling[1] - 1;
512  }
513  dst += stride;
514  src += width;
515  }
516  av_free(buf);
517  return 0;
518 }
519 #endif
520 
521 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
522  const uint8_t *src, int size, int width, int lines)
523 {
524  int i, ret = 0;
525  int line;
526  uint8_t *src2;
527 
529  src2 = s->fax_buffer;
530 
531  if (!src2) {
533  "Error allocating temporary buffer\n");
534  return AVERROR(ENOMEM);
535  }
536 
537  if (!s->fill_order) {
538  memcpy(src2, src, size);
539  } else {
540  for (i = 0; i < size; i++)
541  src2[i] = ff_reverse[src[i]];
542  }
543  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
544  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
545  s->compr, s->fax_opts);
546  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
547  for (line = 0; line < lines; line++) {
548  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
549  dst += stride;
550  }
551  return ret;
552 }
553 
555 
556 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
557  const uint8_t *src, int size, int strip_start, int lines)
558 {
559  PutByteContext pb;
560  int c, line, pixels, code, ret;
561  const uint8_t *ssrc = src;
562  int width = ((s->width * s->bpp) + 7) >> 3;
564  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
565  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
566  desc->nb_components >= 3;
567  int is_dng;
568 
569  if (s->planar)
570  width /= s->bppcount;
571 
572  if (size <= 0)
573  return AVERROR_INVALIDDATA;
574 
575  if (is_yuv) {
576  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
577  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
578  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
579  if (s->yuv_line == NULL) {
580  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
581  return AVERROR(ENOMEM);
582  }
583  dst = s->yuv_line;
584  stride = 0;
585 
586  width = (s->width - 1) / s->subsampling[0] + 1;
587  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
588  av_assert0(width <= bytes_per_row);
589  av_assert0(s->bpp == 24);
590  }
591  if (s->is_bayer) {
592  width = (s->bpp * s->width + 7) >> 3;
593  }
594  if (p->format == AV_PIX_FMT_GRAY12) {
596  if (s->yuv_line == NULL) {
597  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
598  return AVERROR(ENOMEM);
599  }
600  dst = s->yuv_line;
601  stride = 0;
602  }
603 
604  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
605 #if CONFIG_ZLIB
606  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
607  strip_start, is_yuv);
608 #else
610  "zlib support not enabled, "
611  "deflate compression not supported\n");
612  return AVERROR(ENOSYS);
613 #endif
614  }
615  if (s->compr == TIFF_LZMA) {
616 #if CONFIG_LZMA
617  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
618  strip_start, is_yuv);
619 #else
621  "LZMA support not enabled\n");
622  return AVERROR(ENOSYS);
623 #endif
624  }
625  if (s->compr == TIFF_LZW) {
626  if (s->fill_order) {
627  if ((ret = deinvert_buffer(s, src, size)) < 0)
628  return ret;
629  ssrc = src = s->deinvert_buf;
630  }
631  if (size > 1 && !src[0] && (src[1]&1)) {
632  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
633  }
634  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
635  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
636  return ret;
637  }
638  for (line = 0; line < lines; line++) {
639  pixels = ff_lzw_decode(s->lzw, dst, width);
640  if (pixels < width) {
641  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
642  pixels, width);
643  return AVERROR_INVALIDDATA;
644  }
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
647  if (is_yuv) {
648  unpack_yuv(s, p, dst, strip_start + line);
649  line += s->subsampling[1] - 1;
650  } else if (p->format == AV_PIX_FMT_GRAY12) {
651  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
652  }
653  dst += stride;
654  }
655  return 0;
656  }
657  if (s->compr == TIFF_CCITT_RLE ||
658  s->compr == TIFF_G3 ||
659  s->compr == TIFF_G4) {
660  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
661  return AVERROR_INVALIDDATA;
662 
663  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
664  }
665 
666  bytestream2_init(&s->gb, src, size);
667  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
668 
669  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
670 
671  /* Decode JPEG-encoded DNGs with strips */
672  if (s->compr == TIFF_NEWJPEG && is_dng) {
673  if (s->strips > 1) {
674  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
678  return ret;
679  return 0;
680  }
681 
682  for (line = 0; line < lines; line++) {
683  if (src - ssrc > size) {
684  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
685  return AVERROR_INVALIDDATA;
686  }
687 
688  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
689  break;
690  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
691  switch (s->compr) {
692  case TIFF_RAW:
693  if (ssrc + size - src < width)
694  return AVERROR_INVALIDDATA;
695 
696  if (!s->fill_order) {
697  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
698  dst, 1, src, 0, width, 0);
699  } else {
700  int i;
701  for (i = 0; i < width; i++)
702  dst[i] = ff_reverse[src[i]];
703  }
704 
705  /* Color processing for DNG images with uncompressed strips (non-tiled) */
706  if (is_dng) {
707  int is_u16, pixel_size_bytes, pixel_size_bits;
708 
709  is_u16 = (s->bpp > 8);
710  pixel_size_bits = (is_u16 ? 16 : 8);
711  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
712 
713  dng_blit(s,
714  dst,
715  0, // no stride, only 1 line
716  dst,
717  0, // no stride, only 1 line
718  width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount, // need to account for [1, 16] bpp
719  1,
720  0, // single-component variation is only preset in JPEG-encoded DNGs
721  is_u16);
722  }
723 
724  src += width;
725  break;
726  case TIFF_PACKBITS:
727  for (pixels = 0; pixels < width;) {
728  if (ssrc + size - src < 2) {
729  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
730  return AVERROR_INVALIDDATA;
731  }
732  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
733  if (code >= 0) {
734  code++;
735  if (pixels + code > width ||
736  ssrc + size - src < code) {
738  "Copy went out of bounds\n");
739  return AVERROR_INVALIDDATA;
740  }
742  dst, 1, src, 0, code, pixels);
743  src += code;
744  pixels += code;
745  } else if (code != -128) { // -127..-1
746  code = (-code) + 1;
747  if (pixels + code > width) {
749  "Run went out of bounds\n");
750  return AVERROR_INVALIDDATA;
751  }
752  c = *src++;
754  dst, 0, NULL, c, code, pixels);
755  pixels += code;
756  }
757  }
758  if (s->fill_order) {
759  int i;
760  for (i = 0; i < width; i++)
761  dst[i] = ff_reverse[dst[i]];
762  }
763  break;
764  }
765  if (is_yuv) {
766  unpack_yuv(s, p, dst, strip_start + line);
767  line += s->subsampling[1] - 1;
768  } else if (p->format == AV_PIX_FMT_GRAY12) {
769  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
770  }
771  dst += stride;
772  }
773  return 0;
774 }
775 
776 /**
777  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
778  */
779 static uint16_t av_always_inline dng_process_color16(uint16_t value,
780  const uint16_t *lut,
781  uint16_t black_level,
782  float scale_factor) {
783  float value_norm;
784 
785  // Lookup table lookup
786  if (lut)
787  value = lut[value];
788 
789  // Black level subtraction
790  value = av_clip_uint16_c((unsigned)value - black_level);
791 
792  // Color scaling
793  value_norm = (float)value * scale_factor;
794 
795  value = av_clip_uint16_c(value_norm * 65535);
796 
797  return value;
798 }
799 
800 static uint16_t av_always_inline dng_process_color8(uint16_t value,
801  const uint16_t *lut,
802  uint16_t black_level,
803  float scale_factor) {
804  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
805 }
806 
807 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
808  const uint8_t *src, int src_stride,
809  int width, int height, int is_single_comp, int is_u16)
810 {
811  int line, col;
812  float scale_factor;
813 
814  scale_factor = 1.0f / (s->white_level - s->black_level);
815 
816  if (is_single_comp) {
817  if (!is_u16)
818  return; /* <= 8bpp unsupported */
819 
820  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
821  (split vertically in the middle). */
822  for (line = 0; line < height / 2; line++) {
823  uint16_t *dst_u16 = (uint16_t *)dst;
824  uint16_t *src_u16 = (uint16_t *)src;
825 
826  /* Blit first half of input row row to initial row of output */
827  for (col = 0; col < width; col++)
828  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
829 
830  /* Advance the destination pointer by a row (source pointer remains in the same place) */
831  dst += dst_stride * sizeof(uint16_t);
832  dst_u16 = (uint16_t *)dst;
833 
834  /* Blit second half of input row row to next row of output */
835  for (col = 0; col < width; col++)
836  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
837 
838  dst += dst_stride * sizeof(uint16_t);
839  src += src_stride * sizeof(uint16_t);
840  }
841  } else {
842  /* Input and output image are the same size and the MJpeg decoder has done per-component
843  deinterleaving, so blitting here is straightforward. */
844  if (is_u16) {
845  for (line = 0; line < height; line++) {
846  uint16_t *dst_u16 = (uint16_t *)dst;
847  uint16_t *src_u16 = (uint16_t *)src;
848 
849  for (col = 0; col < width; col++)
850  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
851 
852  dst += dst_stride * sizeof(uint16_t);
853  src += src_stride * sizeof(uint16_t);
854  }
855  } else {
856  for (line = 0; line < height; line++) {
857  for (col = 0; col < width; col++)
858  *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
859 
860  dst += dst_stride;
861  src += src_stride;
862  }
863  }
864  }
865 }
866 
867 static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
868  int tile_byte_count, int dst_x, int dst_y, int w, int h)
869 {
870  TiffContext *s = avctx->priv_data;
871  AVPacket jpkt;
872  uint8_t *dst_data, *src_data;
873  uint32_t dst_offset; /* offset from dst buffer in pixels */
874  int is_single_comp, is_u16, pixel_size;
875  int ret;
876 
877  /* Prepare a packet and send to the MJPEG decoder */
878  av_init_packet(&jpkt);
879  jpkt.data = (uint8_t*)s->gb.buffer;
880  jpkt.size = tile_byte_count;
881 
882  if (s->is_bayer) {
883  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
884  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
885  image or not from its own data (and we need that information when decoding it). */
886  mjpegdecctx->bayer = 1;
887  }
888 
889  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
890  if (ret < 0) {
891  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
892  return ret;
893  }
894 
896  if (ret < 0) {
897  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
898 
899  /* Normally skip, error if explode */
900  if (avctx->err_recognition & AV_EF_EXPLODE)
901  return AVERROR_INVALIDDATA;
902  else
903  return 0;
904  }
905 
906  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
907 
908  /* See dng_blit for explanation */
909  is_single_comp = (s->avctx_mjpeg->width == w * 2 && s->avctx_mjpeg->height == h / 2);
910 
911  is_u16 = (s->bpp > 8);
912  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
913 
914  if (is_single_comp && !is_u16) {
915  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
917  return AVERROR_PATCHWELCOME;
918  }
919 
920  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
921  dst_data = frame->data[0] + dst_offset * pixel_size;
922  src_data = s->jpgframe->data[0];
923 
924  dng_blit(s,
925  dst_data,
926  frame->linesize[0] / pixel_size,
927  src_data,
928  s->jpgframe->linesize[0] / pixel_size,
929  w,
930  h,
931  is_single_comp,
932  is_u16);
933 
935 
936  return 0;
937 }
938 
939 static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
940 {
941  TiffContext *s = avctx->priv_data;
942  int tile_idx;
943  int tile_offset_offset, tile_offset;
944  int tile_byte_count_offset, tile_byte_count;
945  int tile_count_x, tile_count_y;
946  int tile_width, tile_length;
947  int has_width_leftover, has_height_leftover;
948  int tile_x = 0, tile_y = 0;
949  int pos_x = 0, pos_y = 0;
950  int ret;
951 
952  s->jpgframe->width = s->tile_width;
953  s->jpgframe->height = s->tile_length;
954 
955  s->avctx_mjpeg->width = s->tile_width;
956  s->avctx_mjpeg->height = s->tile_length;
957 
958  has_width_leftover = (s->width % s->tile_width != 0);
959  has_height_leftover = (s->height % s->tile_length != 0);
960 
961  /* Calculate tile counts (round up) */
962  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
963  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
964 
965  /* Iterate over the number of tiles */
966  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
967  tile_x = tile_idx % tile_count_x;
968  tile_y = tile_idx / tile_count_x;
969 
970  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
971  tile_width = s->width % s->tile_width;
972  else
973  tile_width = s->tile_width;
974 
975  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
976  tile_length = s->height % s->tile_length;
977  else
978  tile_length = s->tile_length;
979 
980  /* Read tile offset */
981  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
982  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
983  tile_offset = ff_tget_long(&s->gb, s->le);
984 
985  /* Read tile byte size */
986  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
987  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
988  tile_byte_count = ff_tget_long(&s->gb, s->le);
989 
990  /* Seek to tile data */
991  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
992 
993  /* Decode JPEG tile and copy it in the reference frame */
994  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
995 
996  if (ret < 0)
997  return ret;
998 
999  /* Advance current positions */
1000  pos_x += tile_width;
1001  if (tile_x == tile_count_x - 1) { // If on the right edge
1002  pos_x = 0;
1003  pos_y += tile_length;
1004  }
1005  }
1006 
1007  /* Frame is ready to be output */
1008  frame->pict_type = AV_PICTURE_TYPE_I;
1009  frame->key_frame = 1;
1010 
1011  return avpkt->size;
1012 }
1013 
1014 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
1015 {
1016  TiffContext *s = avctx->priv_data;
1017 
1018  s->jpgframe->width = s->width;
1019  s->jpgframe->height = s->height;
1020 
1021  s->avctx_mjpeg->width = s->width;
1022  s->avctx_mjpeg->height = s->height;
1023 
1024  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1025 }
1026 
1027 static int init_image(TiffContext *s, ThreadFrame *frame)
1028 {
1029  int ret;
1030  int create_gray_palette = 0;
1031 
1032  // make sure there is no aliasing in the following switch
1033  if (s->bpp >= 100 || s->bppcount >= 10) {
1035  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1036  s->bpp, s->bppcount);
1037  return AVERROR_INVALIDDATA;
1038  }
1039 
1040  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1041  case 11:
1042  if (!s->palette_is_set) {
1044  break;
1045  }
1046  case 21:
1047  case 41:
1049  if (!s->palette_is_set) {
1050  create_gray_palette = 1;
1051  }
1052  break;
1053  case 81:
1055  break;
1056  case 121:
1058  break;
1059  case 10081:
1060  switch (AV_RL32(s->pattern)) {
1061  case 0x02010100:
1063  break;
1064  case 0x00010102:
1066  break;
1067  case 0x01000201:
1069  break;
1070  case 0x01020001:
1072  break;
1073  default:
1074  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1075  AV_RL32(s->pattern));
1076  return AVERROR_PATCHWELCOME;
1077  }
1078  break;
1079  case 10101:
1080  case 10121:
1081  case 10141:
1082  case 10161:
1083  switch (AV_RL32(s->pattern)) {
1084  case 0x02010100:
1086  break;
1087  case 0x00010102:
1089  break;
1090  case 0x01000201:
1092  break;
1093  case 0x01020001:
1095  break;
1096  default:
1097  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1098  AV_RL32(s->pattern));
1099  return AVERROR_PATCHWELCOME;
1100  }
1101  break;
1102  case 243:
1103  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1104  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1106  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1108  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1110  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1112  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1114  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1116  } else {
1117  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1118  return AVERROR_PATCHWELCOME;
1119  }
1120  } else
1122  break;
1123  case 161:
1125  break;
1126  case 162:
1128  break;
1129  case 322:
1131  break;
1132  case 324:
1134  break;
1135  case 405:
1138  else {
1140  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1141  return AVERROR_PATCHWELCOME;
1142  }
1143  break;
1144  case 483:
1146  break;
1147  case 644:
1149  break;
1150  case 1243:
1152  break;
1153  case 1324:
1155  break;
1156  case 1483:
1158  break;
1159  case 1644:
1161  break;
1162  default:
1164  "This format is not supported (bpp=%d, bppcount=%d)\n",
1165  s->bpp, s->bppcount);
1166  return AVERROR_INVALIDDATA;
1167  }
1168 
1169  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1171  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1172  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1173  desc->nb_components < 3) {
1174  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1175  return AVERROR_INVALIDDATA;
1176  }
1177  }
1178 
1179  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1180  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1181  if (ret < 0)
1182  return ret;
1183  }
1184  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1185  return ret;
1186  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1187  if (!create_gray_palette)
1188  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1189  else {
1190  /* make default grayscale pal */
1191  int i;
1192  uint32_t *pal = (uint32_t *)frame->f->data[1];
1193  for (i = 0; i < 1<<s->bpp; i++)
1194  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1195  }
1196  }
1197  return 0;
1198 }
1199 
1200 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1201 {
1202  int offset = tag == TIFF_YRES ? 2 : 0;
1203  s->res[offset++] = num;
1204  s->res[offset] = den;
1205  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1206  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1207  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1208  if (num > INT64_MAX || den > INT64_MAX) {
1209  num = num >> 1;
1210  den = den >> 1;
1211  }
1213  num, den, INT32_MAX);
1214  if (!s->avctx->sample_aspect_ratio.den)
1215  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1216  }
1217 }
1218 
1219 static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
1220 {
1221  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1222  int i, start;
1223  int pos;
1224  int ret;
1225  double *dp;
1226 
1227  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1228  if (ret < 0) {
1229  goto end;
1230  }
1231 
1232  off = bytestream2_tell(&s->gb);
1233  if (count == 1) {
1234  switch (type) {
1235  case TIFF_BYTE:
1236  case TIFF_SHORT:
1237  case TIFF_LONG:
1238  value = ff_tget(&s->gb, type, s->le);
1239  break;
1240  case TIFF_RATIONAL:
1241  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1242  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1243  break;
1244  case TIFF_STRING:
1245  if (count <= 4) {
1246  break;
1247  }
1248  default:
1249  value = UINT_MAX;
1250  }
1251  }
1252 
1253  switch (tag) {
1254  case TIFF_SUBFILE:
1255  s->is_thumbnail = (value != 0);
1256  break;
1257  case TIFF_WIDTH:
1258  s->width = value;
1259  break;
1260  case TIFF_HEIGHT:
1261  s->height = value;
1262  break;
1263  case TIFF_BPP:
1264  if (count > 5U) {
1266  "This format is not supported (bpp=%d, %d components)\n",
1267  value, count);
1268  return AVERROR_INVALIDDATA;
1269  }
1270  s->bppcount = count;
1271  if (count == 1)
1272  s->bpp = value;
1273  else {
1274  switch (type) {
1275  case TIFF_BYTE:
1276  case TIFF_SHORT:
1277  case TIFF_LONG:
1278  s->bpp = 0;
1279  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1280  return AVERROR_INVALIDDATA;
1281  for (i = 0; i < count; i++)
1282  s->bpp += ff_tget(&s->gb, type, s->le);
1283  break;
1284  default:
1285  s->bpp = -1;
1286  }
1287  }
1288  break;
1290  if (count != 1) {
1292  "Samples per pixel requires a single value, many provided\n");
1293  return AVERROR_INVALIDDATA;
1294  }
1295  if (value > 5U) {
1297  "Samples per pixel %d is too large\n", value);
1298  return AVERROR_INVALIDDATA;
1299  }
1300  if (s->bppcount == 1)
1301  s->bpp *= value;
1302  s->bppcount = value;
1303  break;
1304  case TIFF_COMPR:
1305  s->compr = value;
1306  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1307  s->predictor = 0;
1308  switch (s->compr) {
1309  case TIFF_RAW:
1310  case TIFF_PACKBITS:
1311  case TIFF_LZW:
1312  case TIFF_CCITT_RLE:
1313  break;
1314  case TIFF_G3:
1315  case TIFF_G4:
1316  s->fax_opts = 0;
1317  break;
1318  case TIFF_DEFLATE:
1319  case TIFF_ADOBE_DEFLATE:
1320 #if CONFIG_ZLIB
1321  break;
1322 #else
1323  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1324  return AVERROR(ENOSYS);
1325 #endif
1326  case TIFF_JPEG:
1327  case TIFF_NEWJPEG:
1328  s->is_jpeg = 1;
1329  break;
1330  case TIFF_LZMA:
1331 #if CONFIG_LZMA
1332  break;
1333 #else
1334  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1335  return AVERROR(ENOSYS);
1336 #endif
1337  default:
1338  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1339  s->compr);
1340  return AVERROR_INVALIDDATA;
1341  }
1342  break;
1343  case TIFF_ROWSPERSTRIP:
1344  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1345  value = s->height;
1346  s->rps = FFMIN(value, s->height);
1347  break;
1348  case TIFF_STRIP_OFFS:
1349  if (count == 1) {
1350  if (value > INT_MAX) {
1352  "strippos %u too large\n", value);
1353  return AVERROR_INVALIDDATA;
1354  }
1355  s->strippos = 0;
1356  s->stripoff = value;
1357  } else
1358  s->strippos = off;
1359  s->strips = count;
1360  if (s->strips == 1)
1361  s->rps = s->height;
1362  s->sot = type;
1363  break;
1364  case TIFF_STRIP_SIZE:
1365  if (count == 1) {
1366  if (value > INT_MAX) {
1368  "stripsize %u too large\n", value);
1369  return AVERROR_INVALIDDATA;
1370  }
1371  s->stripsizesoff = 0;
1372  s->stripsize = value;
1373  s->strips = 1;
1374  } else {
1375  s->stripsizesoff = off;
1376  }
1377  s->strips = count;
1378  s->sstype = type;
1379  break;
1380  case TIFF_XRES:
1381  case TIFF_YRES:
1382  set_sar(s, tag, value, value2);
1383  break;
1384  case TIFF_TILE_OFFSETS:
1385  s->tile_offsets_offset = off;
1386  s->tile_count = count;
1387  s->is_tiled = 1;
1388  break;
1389  case TIFF_TILE_BYTE_COUNTS:
1390  s->tile_byte_counts_offset = off;
1391  break;
1392  case TIFF_TILE_LENGTH:
1393  s->tile_length = value;
1394  break;
1395  case TIFF_TILE_WIDTH:
1396  s->tile_width = value;
1397  break;
1398  case TIFF_PREDICTOR:
1399  s->predictor = value;
1400  break;
1401  case TIFF_SUB_IFDS:
1402  if (count == 1)
1403  s->sub_ifd = value;
1404  else if (count > 1)
1405  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1406  break;
1408  for (int i = 0; i < FFMIN(count, 1 << s->bpp); i++)
1409  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1410  break;
1411  case DNG_BLACK_LEVEL:
1412  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1413  if (type == TIFF_RATIONAL) {
1414  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1415  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1416 
1417  s->black_level = value / value2;
1418  } else
1419  s->black_level = ff_tget(&s->gb, type, s->le);
1420  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1421  } else {
1422  s->black_level = value / value2;
1423  }
1424  break;
1425  case DNG_WHITE_LEVEL:
1426  s->white_level = value;
1427  break;
1428  case TIFF_CFA_PATTERN_DIM:
1429  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1430  ff_tget(&s->gb, type, s->le) != 2)) {
1431  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  break;
1435  case TIFF_CFA_PATTERN:
1436  s->is_bayer = 1;
1437  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1438  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1439  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1440  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1441  break;
1442  case TIFF_PHOTOMETRIC:
1443  switch (value) {
1446  case TIFF_PHOTOMETRIC_RGB:
1450  case TIFF_PHOTOMETRIC_CFA:
1451  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1452  s->photometric = value;
1453  break;
1461  "PhotometricInterpretation 0x%04X",
1462  value);
1463  return AVERROR_PATCHWELCOME;
1464  default:
1465  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1466  "unknown\n", value);
1467  return AVERROR_INVALIDDATA;
1468  }
1469  break;
1470  case TIFF_FILL_ORDER:
1471  if (value < 1 || value > 2) {
1473  "Unknown FillOrder value %d, trying default one\n", value);
1474  value = 1;
1475  }
1476  s->fill_order = value - 1;
1477  break;
1478  case TIFF_PAL: {
1479  GetByteContext pal_gb[3];
1480  off = type_sizes[type];
1481  if (count / 3 > 256 ||
1482  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1483  return AVERROR_INVALIDDATA;
1484 
1485  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1486  bytestream2_skip(&pal_gb[1], count / 3 * off);
1487  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1488 
1489  off = (type_sizes[type] - 1) << 3;
1490  if (off > 31U) {
1491  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1492  return AVERROR_INVALIDDATA;
1493  }
1494 
1495  for (i = 0; i < count / 3; i++) {
1496  uint32_t p = 0xFF000000;
1497  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1498  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1499  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1500  s->palette[i] = p;
1501  }
1502  s->palette_is_set = 1;
1503  break;
1504  }
1505  case TIFF_PLANAR:
1506  s->planar = value == 2;
1507  break;
1509  if (count != 2) {
1510  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1511  return AVERROR_INVALIDDATA;
1512  }
1513  for (i = 0; i < count; i++) {
1514  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1515  if (s->subsampling[i] <= 0) {
1516  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1517  s->subsampling[i] = 1;
1518  return AVERROR_INVALIDDATA;
1519  }
1520  }
1521  break;
1522  case TIFF_T4OPTIONS:
1523  if (s->compr == TIFF_G3)
1524  s->fax_opts = value;
1525  break;
1526  case TIFF_T6OPTIONS:
1527  if (s->compr == TIFF_G4)
1528  s->fax_opts = value;
1529  break;
1530 #define ADD_METADATA(count, name, sep)\
1531  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1532  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1533  goto end;\
1534  }
1536  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1537  break;
1539  ADD_METADATA(count, "ModelTransformationTag", NULL);
1540  break;
1541  case TIFF_MODEL_TIEPOINT:
1542  ADD_METADATA(count, "ModelTiepointTag", NULL);
1543  break;
1545  if (s->geotag_count) {
1546  avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
1547  return AVERROR_INVALIDDATA;
1548  }
1549  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1550  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1551  s->geotag_count = ff_tget_short(&s->gb, s->le);
1552  if (s->geotag_count > count / 4 - 1) {
1553  s->geotag_count = count / 4 - 1;
1554  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1555  }
1556  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1557  || s->geotag_count == 0) {
1558  s->geotag_count = 0;
1559  return -1;
1560  }
1561  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1562  if (!s->geotags) {
1563  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1564  s->geotag_count = 0;
1565  goto end;
1566  }
1567  for (i = 0; i < s->geotag_count; i++) {
1568  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1569  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1570  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1571 
1572  if (!s->geotags[i].type)
1573  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1574  else
1575  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1576  }
1577  break;
1579  if (count >= INT_MAX / sizeof(int64_t))
1580  return AVERROR_INVALIDDATA;
1581  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1582  return AVERROR_INVALIDDATA;
1583  dp = av_malloc_array(count, sizeof(double));
1584  if (!dp) {
1585  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1586  goto end;
1587  }
1588  for (i = 0; i < count; i++)
1589  dp[i] = ff_tget_double(&s->gb, s->le);
1590  for (i = 0; i < s->geotag_count; i++) {
1591  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1592  if (s->geotags[i].count == 0
1593  || s->geotags[i].offset + s->geotags[i].count > count) {
1594  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1595  } else if (s->geotags[i].val) {
1596  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1597  } else {
1598  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1599  if (!ap) {
1600  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1601  av_freep(&dp);
1602  return AVERROR(ENOMEM);
1603  }
1604  s->geotags[i].val = ap;
1605  }
1606  }
1607  }
1608  av_freep(&dp);
1609  break;
1610  case TIFF_GEO_ASCII_PARAMS:
1611  pos = bytestream2_tell(&s->gb);
1612  for (i = 0; i < s->geotag_count; i++) {
1613  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1614  if (s->geotags[i].count == 0
1615  || s->geotags[i].offset + s->geotags[i].count > count) {
1616  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1617  } else {
1618  char *ap;
1619 
1620  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1621  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1622  return AVERROR_INVALIDDATA;
1623  if (s->geotags[i].val)
1624  return AVERROR_INVALIDDATA;
1625  ap = av_malloc(s->geotags[i].count);
1626  if (!ap) {
1627  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1628  return AVERROR(ENOMEM);
1629  }
1630  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1631  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1632  s->geotags[i].val = ap;
1633  }
1634  }
1635  }
1636  break;
1637  case TIFF_ARTIST:
1638  ADD_METADATA(count, "artist", NULL);
1639  break;
1640  case TIFF_COPYRIGHT:
1641  ADD_METADATA(count, "copyright", NULL);
1642  break;
1643  case TIFF_DATE:
1644  ADD_METADATA(count, "date", NULL);
1645  break;
1646  case TIFF_DOCUMENT_NAME:
1647  ADD_METADATA(count, "document_name", NULL);
1648  break;
1649  case TIFF_HOST_COMPUTER:
1650  ADD_METADATA(count, "computer", NULL);
1651  break;
1653  ADD_METADATA(count, "description", NULL);
1654  break;
1655  case TIFF_MAKE:
1656  ADD_METADATA(count, "make", NULL);
1657  break;
1658  case TIFF_MODEL:
1659  ADD_METADATA(count, "model", NULL);
1660  break;
1661  case TIFF_PAGE_NAME:
1662  ADD_METADATA(count, "page_name", NULL);
1663  break;
1664  case TIFF_PAGE_NUMBER:
1665  ADD_METADATA(count, "page_number", " / ");
1666  // need to seek back to re-read the page number
1667  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1668  // read the page number
1669  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1670  // get back to where we were before the previous seek
1671  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1672  break;
1673  case TIFF_SOFTWARE_NAME:
1674  ADD_METADATA(count, "software", NULL);
1675  break;
1676  case DNG_VERSION:
1677  if (count == 4) {
1678  unsigned int ver[4];
1679  ver[0] = ff_tget(&s->gb, type, s->le);
1680  ver[1] = ff_tget(&s->gb, type, s->le);
1681  ver[2] = ff_tget(&s->gb, type, s->le);
1682  ver[3] = ff_tget(&s->gb, type, s->le);
1683 
1684  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1685  ver[0], ver[1], ver[2], ver[3]);
1686 
1688  }
1689  break;
1690  case CINEMADNG_TIME_CODES:
1691  case CINEMADNG_FRAME_RATE:
1692  case CINEMADNG_T_STOP:
1693  case CINEMADNG_REEL_NAME:
1696  break;
1697  default:
1698  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1700  "Unknown or unsupported tag %d/0x%0X\n",
1701  tag, tag);
1702  return AVERROR_INVALIDDATA;
1703  }
1704  }
1705 end:
1706  if (s->bpp > 64U) {
1708  "This format is not supported (bpp=%d, %d components)\n",
1709  s->bpp, count);
1710  s->bpp = 0;
1711  return AVERROR_INVALIDDATA;
1712  }
1713  bytestream2_seek(&s->gb, start, SEEK_SET);
1714  return 0;
1715 }
1716 
1717 static int decode_frame(AVCodecContext *avctx,
1718  void *data, int *got_frame, AVPacket *avpkt)
1719 {
1720  TiffContext *const s = avctx->priv_data;
1721  AVFrame *const p = data;
1722  ThreadFrame frame = { .f = data };
1723  unsigned off, last_off;
1724  int le, ret, plane, planes;
1725  int i, j, entries, stride;
1726  unsigned soff, ssize;
1727  uint8_t *dst;
1728  GetByteContext stripsizes;
1729  GetByteContext stripdata;
1730  int retry_for_subifd, retry_for_page;
1731  int is_dng;
1732 
1733  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1734 
1735  // parse image header
1736  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1737  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1738  return ret;
1739  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1740  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1741  return AVERROR_INVALIDDATA;
1742  }
1743  s->le = le;
1744  // TIFF_BPP is not a required tag and defaults to 1
1745 
1747 again:
1748  s->is_thumbnail = 0;
1749  s->bppcount = s->bpp = 1;
1751  s->compr = TIFF_RAW;
1752  s->fill_order = 0;
1753  s->white_level = 0;
1754  s->is_bayer = 0;
1755  s->is_tiled = 0;
1756  s->is_jpeg = 0;
1757  s->cur_page = 0;
1758 
1759  for (i = 0; i < 65536; i++)
1760  s->dng_lut[i] = i;
1761 
1762  free_geotags(s);
1763 
1764  // Reset these offsets so we can tell if they were set this frame
1765  s->stripsizesoff = s->strippos = 0;
1766  /* parse image file directory */
1767  bytestream2_seek(&s->gb, off, SEEK_SET);
1768  entries = ff_tget_short(&s->gb, le);
1769  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1770  return AVERROR_INVALIDDATA;
1771  for (i = 0; i < entries; i++) {
1772  if ((ret = tiff_decode_tag(s, p)) < 0)
1773  return ret;
1774  }
1775 
1776  if (s->get_thumbnail && !s->is_thumbnail) {
1777  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1778  return AVERROR_EOF;
1779  }
1780 
1781  /** whether we should process this IFD's SubIFD */
1782  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1783  /** whether we should process this multi-page IFD's next page */
1784  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1785 
1786  last_off = off;
1787  if (retry_for_page) {
1788  // set offset to the next IFD
1789  off = ff_tget_long(&s->gb, le);
1790  } else if (retry_for_subifd) {
1791  // set offset to the SubIFD
1792  off = s->sub_ifd;
1793  }
1794 
1795  if (retry_for_subifd || retry_for_page) {
1796  if (!off) {
1797  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1798  return AVERROR_INVALIDDATA;
1799  }
1800  if (off <= last_off) {
1801  avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
1802  return AVERROR_INVALIDDATA;
1803  }
1804  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1805  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1806  return AVERROR_INVALIDDATA;
1807  }
1808  s->sub_ifd = 0;
1809  goto again;
1810  }
1811 
1812  /* At this point we've decided on which (Sub)IFD to process */
1813 
1814  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1815 
1816  for (i = 0; i<s->geotag_count; i++) {
1817  const char *keyname = get_geokey_name(s->geotags[i].key);
1818  if (!keyname) {
1819  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1820  continue;
1821  }
1822  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1823  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1824  continue;
1825  }
1826  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1827  if (ret<0) {
1828  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1829  return ret;
1830  }
1831  }
1832 
1833  if (is_dng) {
1834  if (s->white_level == 0)
1835  s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
1836 
1837  if (s->white_level <= s->black_level) {
1838  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1839  s->black_level, s->white_level);
1840  return AVERROR_INVALIDDATA;
1841  }
1842  }
1843 
1844  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1845  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1846  return AVERROR_INVALIDDATA;
1847  }
1848 
1849  /* now we have the data and may start decoding */
1850  if ((ret = init_image(s, &frame)) < 0)
1851  return ret;
1852 
1853  if (!s->is_tiled) {
1854  if (s->strips == 1 && !s->stripsize) {
1855  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1856  s->stripsize = avpkt->size - s->stripoff;
1857  }
1858 
1859  if (s->stripsizesoff) {
1860  if (s->stripsizesoff >= (unsigned)avpkt->size)
1861  return AVERROR_INVALIDDATA;
1862  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1863  avpkt->size - s->stripsizesoff);
1864  }
1865  if (s->strippos) {
1866  if (s->strippos >= (unsigned)avpkt->size)
1867  return AVERROR_INVALIDDATA;
1868  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1869  avpkt->size - s->strippos);
1870  }
1871 
1872  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1873  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1874  return AVERROR_INVALIDDATA;
1875  }
1876  }
1877 
1881  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1883  }
1884 
1885  /* Handle DNG images with JPEG-compressed tiles */
1886 
1887  if (is_dng && s->is_tiled) {
1888  if (!s->is_jpeg) {
1889  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1890  return AVERROR_PATCHWELCOME;
1891  } else if (!s->is_bayer) {
1892  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1893  return AVERROR_PATCHWELCOME;
1894  } else {
1895  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1896  *got_frame = 1;
1897  return ret;
1898  }
1899  }
1900 
1901  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1902 
1903  planes = s->planar ? s->bppcount : 1;
1904  for (plane = 0; plane < planes; plane++) {
1905  uint8_t *five_planes = NULL;
1906  int remaining = avpkt->size;
1907  int decoded_height;
1908  stride = p->linesize[plane];
1909  dst = p->data[plane];
1911  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1912  stride = stride * 5 / 4;
1913  five_planes =
1914  dst = av_malloc(stride * s->height);
1915  if (!dst)
1916  return AVERROR(ENOMEM);
1917  }
1918  for (i = 0; i < s->height; i += s->rps) {
1919  if (i)
1920  dst += s->rps * stride;
1921  if (s->stripsizesoff)
1922  ssize = ff_tget(&stripsizes, s->sstype, le);
1923  else
1924  ssize = s->stripsize;
1925 
1926  if (s->strippos)
1927  soff = ff_tget(&stripdata, s->sot, le);
1928  else
1929  soff = s->stripoff;
1930 
1931  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
1932  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
1933  av_freep(&five_planes);
1934  return AVERROR_INVALIDDATA;
1935  }
1936  remaining -= ssize;
1937  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
1938  FFMIN(s->rps, s->height - i))) < 0) {
1939  if (avctx->err_recognition & AV_EF_EXPLODE) {
1940  av_freep(&five_planes);
1941  return ret;
1942  }
1943  break;
1944  }
1945  }
1946  decoded_height = FFMIN(i, s->height);
1947 
1948  if (s->predictor == 2) {
1949  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1950  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
1951  return AVERROR_PATCHWELCOME;
1952  }
1953  dst = five_planes ? five_planes : p->data[plane];
1954  soff = s->bpp >> 3;
1955  if (s->planar)
1956  soff = FFMAX(soff / s->bppcount, 1);
1957  ssize = s->width * soff;
1958  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
1961  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
1964  for (i = 0; i < decoded_height; i++) {
1965  for (j = soff; j < ssize; j += 2)
1966  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
1967  dst += stride;
1968  }
1969  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1972  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
1975  for (i = 0; i < decoded_height; i++) {
1976  for (j = soff; j < ssize; j += 2)
1977  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
1978  dst += stride;
1979  }
1980  } else {
1981  for (i = 0; i < decoded_height; i++) {
1982  for (j = soff; j < ssize; j++)
1983  dst[j] += dst[j - soff];
1984  dst += stride;
1985  }
1986  }
1987  }
1988 
1990  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
1991  dst = p->data[plane];
1992  for (i = 0; i < s->height; i++) {
1993  for (j = 0; j < stride; j++)
1994  dst[j] = c - dst[j];
1995  dst += stride;
1996  }
1997  }
1998 
2001  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2002  uint8_t *src = five_planes ? five_planes : p->data[plane];
2003  dst = p->data[plane];
2004  for (i = 0; i < s->height; i++) {
2005  for (j = 0; j < s->width; j++) {
2006  int k = 255 - src[x * j + 3];
2007  int r = (255 - src[x * j ]) * k;
2008  int g = (255 - src[x * j + 1]) * k;
2009  int b = (255 - src[x * j + 2]) * k;
2010  dst[4 * j ] = r * 257 >> 16;
2011  dst[4 * j + 1] = g * 257 >> 16;
2012  dst[4 * j + 2] = b * 257 >> 16;
2013  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2014  }
2015  src += stride;
2016  dst += p->linesize[plane];
2017  }
2018  av_freep(&five_planes);
2019  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2021  dst = p->data[plane];
2022  for (i = 0; i < s->height; i++) {
2023  for (j = 0; j < s->width; j++) {
2024  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2025  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2026  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2027  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2028  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2029  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2030  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2031  AV_WB16(dst + 8 * j + 6, 65535);
2032  }
2033  dst += p->linesize[plane];
2034  }
2035  }
2036  }
2037 
2038  if (s->planar && s->bppcount > 2) {
2039  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2040  FFSWAP(int, p->linesize[0], p->linesize[2]);
2041  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2042  FFSWAP(int, p->linesize[0], p->linesize[1]);
2043  }
2044 
2045  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2046  uint16_t *dst = (uint16_t *)p->data[0];
2047  for (i = 0; i < s->height; i++) {
2048  for (j = 0; j < s->width; j++)
2049  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2050  dst += stride / 2;
2051  }
2052  }
2053 
2054  *got_frame = 1;
2055 
2056  return avpkt->size;
2057 }
2058 
2060 {
2061  TiffContext *s = avctx->priv_data;
2062  const AVCodec *codec;
2063  int ret;
2064 
2065  s->width = 0;
2066  s->height = 0;
2067  s->subsampling[0] =
2068  s->subsampling[1] = 1;
2069  s->avctx = avctx;
2070  ff_lzw_decode_open(&s->lzw);
2071  if (!s->lzw)
2072  return AVERROR(ENOMEM);
2074 
2075  /* Allocate JPEG frame */
2076  s->jpgframe = av_frame_alloc();
2077  if (!s->jpgframe)
2078  return AVERROR(ENOMEM);
2079 
2080  /* Prepare everything needed for JPEG decoding */
2082  if (!codec)
2083  return AVERROR_BUG;
2084  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2085  if (!s->avctx_mjpeg)
2086  return AVERROR(ENOMEM);
2087  s->avctx_mjpeg->flags = avctx->flags;
2088  s->avctx_mjpeg->flags2 = avctx->flags2;
2089  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2090  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2091  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2092  if (ret < 0) {
2093  return ret;
2094  }
2095 
2096  return 0;
2097 }
2098 
2099 static av_cold int tiff_end(AVCodecContext *avctx)
2100 {
2101  TiffContext *const s = avctx->priv_data;
2102 
2103  free_geotags(s);
2104 
2105  ff_lzw_decode_close(&s->lzw);
2106  av_freep(&s->deinvert_buf);
2107  s->deinvert_buf_size = 0;
2108  av_freep(&s->yuv_line);
2109  s->yuv_line_size = 0;
2110  av_freep(&s->fax_buffer);
2111  s->fax_buffer_size = 0;
2112  av_frame_free(&s->jpgframe);
2114  return 0;
2115 }
2116 
2117 #define OFFSET(x) offsetof(TiffContext, x)
2118 static const AVOption tiff_options[] = {
2119  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2120  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2121  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2122  { NULL },
2123 };
2124 
2125 static const AVClass tiff_decoder_class = {
2126  .class_name = "TIFF decoder",
2127  .item_name = av_default_item_name,
2128  .option = tiff_options,
2129  .version = LIBAVUTIL_VERSION_INT,
2130 };
2131 
2133  .name = "tiff",
2134  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2135  .type = AVMEDIA_TYPE_VIDEO,
2136  .id = AV_CODEC_ID_TIFF,
2137  .priv_data_size = sizeof(TiffContext),
2138  .init = tiff_init,
2139  .close = tiff_end,
2140  .decode = decode_frame,
2142  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2143  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2144  .priv_class = &tiff_decoder_class,
2145 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
Definition: tiff.h:64
int plane
Definition: avisynth_c.h:384
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:800
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
Definition: tiff.h:120
int offset
Definition: tiff.h:210
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:807
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values ...
Definition: tiff.h:182
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:2758
AVOption.
Definition: opt.h:246
int tile_offsets_offset
Definition: tiff.c:99
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:477
int fill_order
Definition: tiff.c:79
unsigned int bpp
Definition: tiff.c:69
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
int geotag_count
Definition: tiff.c:112
uint16_t dng_lut[65536]
Definition: tiff.c:87
uint32_t res[4]
Definition: tiff.c:80
Definition: tiff.h:63
int sstype
Definition: tiff.c:92
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
uint8_t pattern[4]
Definition: tiff.c:84
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
const char * g
Definition: vf_curves.c:115
const char *const name
Definition: tiff.h:216
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
const uint8_t ff_reverse[256]
Definition: reverse.c:23
#define avpriv_request_sample(...)
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
Definition: pixfmt.h:262
Definition: tiff.h:57
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
Definition: pixfmt.h:263
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
unsigned white_level
Definition: tiff.c:86
int size
Definition: avcodec.h:1481
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:162
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1947
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
GLint GLenum type
Definition: opengl_enc.c:104
static void free_geotags(TiffContext *const s)
Definition: tiff.c:121
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
const char * key
uint8_t * fax_buffer
Definition: tiff.c:109
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
Definition: pixfmt.h:260
enum TiffType tiff_type
Definition: tiff.c:67
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
#define src
Definition: vp8dsp.c:254
unsigned int yuv_line_size
Definition: tiff.c:108
AVCodec.
Definition: avcodec.h:3492
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
Definition: tiff.h:124
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
Definition: tiff.h:123
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1717
Macro definitions for various function/variable attributes.
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:556
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1219
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int deinvert_buf_size
Definition: tiff.c:106
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int get_subimage
Definition: tiff.c:63
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5) ...
Definition: tiff.c:779
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
char * val
Definition: tiff.h:211
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:473
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2059
int is_thumbnail
Definition: tiff.c:81
int get_thumbnail
Definition: tiff.c:65
uint8_t * data
Definition: avcodec.h:1480
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1531
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:265
int stripoff
Definition: tiff.c:94
#define AVERROR_EOF
End of file.
Definition: error.h:55
bitstream reader API header.
AVDictionary * metadata
metadata.
Definition: frame.h:581
int tile_count
Definition: tiff.c:101
ptrdiff_t size
Definition: opengl_enc.c:100
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
LZWState * lzw
Definition: tiff.c:95
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:414
#define av_log(a,...)
Definition: tiff.h:78
int planar
Definition: tiff.c:75
#define U(x)
Definition: vp56_arith.h:37
Definition: lzw.c:46
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int height
Definition: tiff.c:68
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
error code definitions
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:536
unsigned int fax_buffer_size
Definition: tiff.c:110
enum TiffGeoTagKey key
Definition: tiff.h:207
int sot
Definition: tiff.c:93
TIFF data tables.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define pv
Definition: regdef.h:60
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2099
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:739
const char * r
Definition: vf_curves.c:114
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:335
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1648
uint16_t get_page
Definition: tiff.c:64
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1200
int width
Definition: tiff.c:68
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:521
int strips
Definition: tiff.c:92
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:381
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
#define FFMAX(a, b)
Definition: common.h:94
uint8_t * yuv_line
Definition: tiff.c:107
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int predictor
Definition: tiff.c:78
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
enum TiffPhotometric photometric
Definition: tiff.c:74
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int stripsize
Definition: tiff.c:94
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
static const AVOption tiff_options[]
Definition: tiff.c:2118
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2697
#define FFMIN(a, b)
Definition: common.h:96
uint32_t sub_ifd
Definition: tiff.c:89
int le
Definition: tiff.c:72
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1014
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
int width
picture width / height.
Definition: avcodec.h:1741
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:2771
uint8_t w
Definition: llviddspenc.c:38
int rps
Definition: tiff.c:92
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:361
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:116
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int is_bayer
Definition: tiff.c:83
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2708
int n
Definition: avisynth_c.h:760
int is_tiled
Definition: tiff.c:98
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:280
int is_jpeg
Definition: tiff.c:103
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:411
#define FF_ARRAY_ELEMS(a)
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
if(ret)
int palette_is_set
Definition: tiff.c:71
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:676
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:413
uint32_t palette[256]
Definition: tiff.c:70
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
Definition: tiff.h:51
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const char * get_geokey_name(int key)
Definition: tiff.c:137
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:119
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
enum TiffCompr compr
Definition: tiff.c:73
unsigned int bppcount
Definition: tiff.c:69
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
main external API structure.
Definition: avcodec.h:1568
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:894
Definition: tiff.h:122
void * buf
Definition: avisynth_c.h:766
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVCodecContext * avctx
Definition: tiff.c:56
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:197
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int subsampling[2]
Definition: tiff.c:76
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
#define snprintf
Definition: snprintf.h:34
uint16_t cur_page
Definition: tiff.c:90
static int get_geokey_type(int key)
Definition: tiff.c:147
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
int tile_length
Definition: tiff.c:100
int strippos
Definition: tiff.c:94
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:867
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int count
Definition: tiff.h:209
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:412
enum TiffTags type
Definition: tiff.h:208
LZW decoding routines.
#define OFFSET(x)
Definition: tiff.c:2117
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:939
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int stripsizesoff
Definition: tiff.c:94
Y , 8bpp.
Definition: pixfmt.h:74
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
int tile_byte_counts_offset
Definition: tiff.c:99
uint8_t * deinvert_buf
Definition: tiff.c:105
common internal api header.
static char * get_geokey_val(int key, int val)
Definition: tiff.c:171
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:328
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:239
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
Definition: pixfmt.h:261
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
AVFrame * jpgframe
Definition: tiff.c:61
int den
Denominator.
Definition: rational.h:60
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
#define RET_GEOKEY_VAL(TYPE, array)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:793
int tile_width
Definition: tiff.c:100
void * priv_data
Definition: avcodec.h:1595
#define av_free(p)
static const struct @317 planes[]
int pixels
Definition: avisynth_c.h:390
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
int len
Digital Negative (DNG) image.
Definition: tiff.h:41
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1655
int fax_opts
Definition: tiff.c:77
static const AVClass tiff_decoder_class
Definition: tiff.c:2125
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:132
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
unsigned black_level
Definition: tiff.c:85
int height
Definition: frame.h:353
#define av_freep(p)
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1027
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
void INT64 start
Definition: avisynth_c.h:766
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:543
AVCodec ff_tiff_decoder
Definition: tiff.c:2132
#define av_always_inline
Definition: attributes.h:39
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext gb
Definition: tiff.c:57
#define stride
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:157
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffGeoTag * geotags
Definition: tiff.c:113
MJPEG decoder.
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
#define ADD_METADATA(count, name, sep)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
Definition: tiff.h:74
This structure stores compressed data.
Definition: avcodec.h:1457
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
for(j=16;j >0;--j)
CCITT Fax Group 3 and 4 decompression.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:348
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
const char * name
Definition: opengl_enc.c:102