FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/error.h"
40 #include "libavutil/intreadwrite.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/reverse.h"
43 #include "avcodec.h"
44 #include "bytestream.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 #include "faxcompr.h"
48 #include "lzw.h"
49 #include "tiff.h"
50 #include "tiff_data.h"
51 #include "mjpegdec.h"
52 #include "thread.h"
53 #include "get_bits.h"
54 
55 typedef struct TiffContext {
56  AVClass *class;
59 
60  /* JPEG decoding for DNG */
61  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
62  AVPacket *jpkt; // encoded JPEG tile
63  AVFrame *jpgframe; // decoded JPEG tile
64 
66  uint16_t get_page;
68 
70  int width, height;
71  unsigned int bpp, bppcount;
72  uint32_t palette[256];
74  int le;
77  int planar;
78  int subsampling[2];
79  int fax_opts;
80  int predictor;
82  uint32_t res[4];
84  unsigned last_tag;
85 
86  int is_bayer;
88  uint8_t pattern[4];
89 
90  float analog_balance[4];
91  float as_shot_neutral[4];
92  float as_shot_white[4];
93  float color_matrix[3][4];
94  float camera_calibration[4][4];
95  float premultiply[4];
96  float black_level[4];
97 
98  unsigned white_level;
99  uint16_t dng_lut[65536];
100 
101  uint32_t sub_ifd;
102  uint16_t cur_page;
103 
105  int sot;
108 
109  /* Tile support */
110  int is_tiled;
113 
114  int is_jpeg;
115 
116  uint8_t *deinvert_buf;
118  uint8_t *yuv_line;
119  unsigned int yuv_line_size;
120 
123 } TiffContext;
124 
125 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
126 
127 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
128  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
129  s->tiff_type = tiff_type;
130 }
131 
132 static void free_geotags(TiffContext *const s)
133 {
134  int i;
135  for (i = 0; i < s->geotag_count; i++) {
136  if (s->geotags[i].val)
137  av_freep(&s->geotags[i].val);
138  }
139  av_freep(&s->geotags);
140  s->geotag_count = 0;
141 }
142 
143 #define RET_GEOKEY(TYPE, array, element)\
144  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
145  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
146  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
147 
148 static const char *get_geokey_name(int key)
149 {
150  RET_GEOKEY(VERT, vert, name);
151  RET_GEOKEY(PROJ, proj, name);
152  RET_GEOKEY(GEOG, geog, name);
153  RET_GEOKEY(CONF, conf, name);
154 
155  return NULL;
156 }
157 
158 static int get_geokey_type(int key)
159 {
160  RET_GEOKEY(VERT, vert, type);
161  RET_GEOKEY(PROJ, proj, type);
162  RET_GEOKEY(GEOG, geog, type);
163  RET_GEOKEY(CONF, conf, type);
164 
165  return AVERROR_INVALIDDATA;
166 }
167 
168 static int cmp_id_key(const void *id, const void *k)
169 {
170  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
171 }
172 
173 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
174 {
175  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
176  if(r)
177  return r->name;
178 
179  return NULL;
180 }
181 
182 static char *get_geokey_val(int key, int val)
183 {
184  char *ap;
185 
187  return av_strdup("undefined");
189  return av_strdup("User-Defined");
190 
191 #define RET_GEOKEY_VAL(TYPE, array)\
192  if (val >= TIFF_##TYPE##_OFFSET &&\
193  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
194  return av_strdup(tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
195 
196  switch (key) {
198  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
199  break;
201  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
202  break;
206  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
207  break;
210  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
211  break;
213  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
214  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
215  break;
217  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
218  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
219  break;
221  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
222  break;
224  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
225  break;
228  if(ap) return ap;
229  break;
232  if(ap) return ap;
233  break;
235  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
236  break;
238  RET_GEOKEY_VAL(VERT_CS, vert_cs);
239  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
240  break;
241 
242  }
243 
244  ap = av_malloc(14);
245  if (ap)
246  snprintf(ap, 14, "Unknown-%d", val);
247  return ap;
248 }
249 
250 static char *doubles2str(double *dp, int count, const char *sep)
251 {
252  int i;
253  char *ap, *ap0;
254  uint64_t component_len;
255  if (!sep) sep = ", ";
256  component_len = 24LL + strlen(sep);
257  if (count >= (INT_MAX - 1)/component_len)
258  return NULL;
259  ap = av_malloc(component_len * count + 1);
260  if (!ap)
261  return NULL;
262  ap0 = ap;
263  ap[0] = '\0';
264  for (i = 0; i < count; i++) {
265  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
266  if(l >= component_len) {
267  av_free(ap0);
268  return NULL;
269  }
270  ap += l;
271  }
272  ap0[strlen(ap0) - strlen(sep)] = '\0';
273  return ap0;
274 }
275 
276 static int add_metadata(int count, int type,
277  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
278 {
279  switch(type) {
280  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
281  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
282  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
283  default : return AVERROR_INVALIDDATA;
284  };
285 }
286 
287 /**
288  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
289  */
290 static uint16_t av_always_inline dng_process_color16(uint16_t value,
291  const uint16_t *lut,
292  float black_level,
293  float scale_factor)
294 {
295  float value_norm;
296 
297  // Lookup table lookup
298  value = lut[value];
299 
300  // Black level subtraction
301  // Color scaling
302  value_norm = ((float)value - black_level) * scale_factor;
303 
304  value = av_clip_uint16(lrintf(value_norm));
305 
306  return value;
307 }
308 
309 static uint16_t av_always_inline dng_process_color8(uint16_t value,
310  const uint16_t *lut,
311  float black_level,
312  float scale_factor)
313 {
314  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
315 }
316 
317 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
318  const uint8_t *src, int src_stride, int width, int height,
319  int is_single_comp, int is_u16, int odd_line)
320 {
321  float scale_factor[4];
322  int line, col;
323 
324  if (s->is_bayer) {
325  for (int i = 0; i < 4; i++)
326  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
327  } else {
328  for (int i = 0; i < 4; i++)
329  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
330  }
331 
332  if (is_single_comp) {
333  if (!is_u16)
334  return; /* <= 8bpp unsupported */
335 
336  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
337  (split vertically in the middle). */
338  for (line = 0; line < height / 2; line++) {
339  uint16_t *dst_u16 = (uint16_t *)dst;
340  const uint16_t *src_u16 = (const uint16_t *)src;
341 
342  /* Blit first half of input row row to initial row of output */
343  for (col = 0; col < width; col++)
344  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
345 
346  /* Advance the destination pointer by a row (source pointer remains in the same place) */
347  dst += dst_stride * sizeof(uint16_t);
348  dst_u16 = (uint16_t *)dst;
349 
350  /* Blit second half of input row row to next row of output */
351  for (col = 0; col < width; col++)
352  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
353 
354  dst += dst_stride * sizeof(uint16_t);
355  src += src_stride * sizeof(uint16_t);
356  }
357  } else {
358  /* Input and output image are the same size and the MJpeg decoder has done per-component
359  deinterleaving, so blitting here is straightforward. */
360  if (is_u16) {
361  for (line = 0; line < height; line++) {
362  uint16_t *dst_u16 = (uint16_t *)dst;
363  const uint16_t *src_u16 = (const uint16_t *)src;
364 
365  for (col = 0; col < width; col++)
366  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
367  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
368  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
369 
370  dst += dst_stride * sizeof(uint16_t);
371  src += src_stride * sizeof(uint16_t);
372  }
373  } else {
374  for (line = 0; line < height; line++) {
375  uint8_t *dst_u8 = dst;
376  const uint8_t *src_u8 = src;
377 
378  for (col = 0; col < width; col++)
379  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
380  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
381  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
382 
383  dst += dst_stride;
384  src += src_stride;
385  }
386  }
387  }
388 }
389 
391  unsigned int bpp, uint8_t* dst,
392  int usePtr, const uint8_t *src,
393  uint8_t c, int width, int offset)
394 {
395  switch (bpp) {
396  case 1:
397  while (--width >= 0) {
398  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
399  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
400  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
401  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
402  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
403  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
404  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
405  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
406  }
407  break;
408  case 2:
409  while (--width >= 0) {
410  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
411  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
412  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
413  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
414  }
415  break;
416  case 4:
417  while (--width >= 0) {
418  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
419  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
420  }
421  break;
422  case 10:
423  case 12:
424  case 14: {
425  uint16_t *dst16 = (uint16_t *)dst;
426  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
427  uint8_t shift = is_dng ? 0 : 16 - bpp;
428  GetBitContext gb;
429 
430  init_get_bits8(&gb, src, width);
431  for (int i = 0; i < s->width; i++) {
432  dst16[i] = get_bits(&gb, bpp) << shift;
433  }
434  }
435  break;
436  default:
437  if (usePtr) {
438  memcpy(dst + offset, src, width);
439  } else {
440  memset(dst + offset, c, width);
441  }
442  }
443 }
444 
445 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
446 {
447  int i;
448 
449  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
450  if (!s->deinvert_buf)
451  return AVERROR(ENOMEM);
452  for (i = 0; i < size; i++)
453  s->deinvert_buf[i] = ff_reverse[src[i]];
454 
455  return 0;
456 }
457 
458 static void unpack_gray(TiffContext *s, AVFrame *p,
459  const uint8_t *src, int lnum, int width, int bpp)
460 {
461  GetBitContext gb;
462  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
463 
464  init_get_bits8(&gb, src, width);
465 
466  for (int i = 0; i < s->width; i++) {
467  dst[i] = get_bits(&gb, bpp);
468  }
469 }
470 
471 static void unpack_yuv(TiffContext *s, AVFrame *p,
472  const uint8_t *src, int lnum)
473 {
474  int i, j, k;
475  int w = (s->width - 1) / s->subsampling[0] + 1;
476  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
477  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
478  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
479  for (i = 0; i < w; i++) {
480  for (j = 0; j < s->subsampling[1]; j++)
481  for (k = 0; k < s->subsampling[0]; k++)
482  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
483  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
484  *pu++ = *src++;
485  *pv++ = *src++;
486  }
487  }else{
488  for (i = 0; i < w; i++) {
489  for (j = 0; j < s->subsampling[1]; j++)
490  for (k = 0; k < s->subsampling[0]; k++)
491  p->data[0][(lnum + j) * p->linesize[0] +
492  i * s->subsampling[0] + k] = *src++;
493  *pu++ = *src++;
494  *pv++ = *src++;
495  }
496  }
497 }
498 
499 #if CONFIG_ZLIB
500 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
501  int size)
502 {
503  z_stream zstream = { 0 };
504  int zret;
505 
506  zstream.next_in = src;
507  zstream.avail_in = size;
508  zstream.next_out = dst;
509  zstream.avail_out = *len;
510  zret = inflateInit(&zstream);
511  if (zret != Z_OK) {
512  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
513  return zret;
514  }
515  zret = inflate(&zstream, Z_SYNC_FLUSH);
516  inflateEnd(&zstream);
517  *len = zstream.total_out;
518  return zret == Z_STREAM_END ? Z_OK : zret;
519 }
520 
521 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
522  const uint8_t *src, int size, int width, int lines,
523  int strip_start, int is_yuv)
524 {
525  uint8_t *zbuf;
526  unsigned long outlen;
527  int ret, line;
528  outlen = width * lines;
529  zbuf = av_malloc(outlen);
530  if (!zbuf)
531  return AVERROR(ENOMEM);
532  if (s->fill_order) {
533  if ((ret = deinvert_buffer(s, src, size)) < 0) {
534  av_free(zbuf);
535  return ret;
536  }
537  src = s->deinvert_buf;
538  }
539  ret = tiff_uncompress(zbuf, &outlen, src, size);
540  if (ret != Z_OK) {
541  av_log(s->avctx, AV_LOG_ERROR,
542  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
543  (unsigned long)width * lines, ret);
544  av_free(zbuf);
545  return AVERROR_UNKNOWN;
546  }
547  src = zbuf;
548  for (line = 0; line < lines; line++) {
549  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
550  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
551  } else {
552  memcpy(dst, src, width);
553  }
554  if (is_yuv) {
555  unpack_yuv(s, p, dst, strip_start + line);
556  line += s->subsampling[1] - 1;
557  }
558  dst += stride;
559  src += width;
560  }
561  av_free(zbuf);
562  return 0;
563 }
564 #endif
565 
566 #if CONFIG_LZMA
567 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
568  int size)
569 {
570  lzma_stream stream = LZMA_STREAM_INIT;
571  lzma_ret ret;
572 
573  stream.next_in = src;
574  stream.avail_in = size;
575  stream.next_out = dst;
576  stream.avail_out = *len;
577  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
578  if (ret != LZMA_OK) {
579  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
580  return ret;
581  }
582  ret = lzma_code(&stream, LZMA_RUN);
583  lzma_end(&stream);
584  *len = stream.total_out;
585  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
586 }
587 
588 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
589  const uint8_t *src, int size, int width, int lines,
590  int strip_start, int is_yuv)
591 {
592  uint64_t outlen = width * (uint64_t)lines;
593  int ret, line;
594  uint8_t *buf = av_malloc(outlen);
595  if (!buf)
596  return AVERROR(ENOMEM);
597  if (s->fill_order) {
598  if ((ret = deinvert_buffer(s, src, size)) < 0) {
599  av_free(buf);
600  return ret;
601  }
602  src = s->deinvert_buf;
603  }
604  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
605  if (ret != LZMA_OK) {
606  av_log(s->avctx, AV_LOG_ERROR,
607  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
608  (uint64_t)width * lines, ret);
609  av_free(buf);
610  return AVERROR_UNKNOWN;
611  }
612  src = buf;
613  for (line = 0; line < lines; line++) {
614  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
615  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
616  } else {
617  memcpy(dst, src, width);
618  }
619  if (is_yuv) {
620  unpack_yuv(s, p, dst, strip_start + line);
621  line += s->subsampling[1] - 1;
622  }
623  dst += stride;
624  src += width;
625  }
626  av_free(buf);
627  return 0;
628 }
629 #endif
630 
631 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
632  const uint8_t *src, int size, int width, int lines)
633 {
634  int line;
635  int ret;
636 
637  if (s->fill_order) {
638  if ((ret = deinvert_buffer(s, src, size)) < 0)
639  return ret;
640  src = s->deinvert_buf;
641  }
642  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
643  s->compr, s->fax_opts);
644  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
645  for (line = 0; line < lines; line++) {
646  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
647  dst += stride;
648  }
649  return ret;
650 }
651 
653  int tile_byte_count, int dst_x, int dst_y, int w, int h)
654 {
655  TiffContext *s = avctx->priv_data;
656  uint8_t *dst_data, *src_data;
657  uint32_t dst_offset; /* offset from dst buffer in pixels */
658  int is_single_comp, is_u16, pixel_size;
659  int ret;
660 
661  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
662  return AVERROR_INVALIDDATA;
663 
664  /* Prepare a packet and send to the MJPEG decoder */
665  av_packet_unref(s->jpkt);
666  s->jpkt->data = (uint8_t*)s->gb.buffer;
667  s->jpkt->size = tile_byte_count;
668 
669  if (s->is_bayer) {
670  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
671  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
672  image or not from its own data (and we need that information when decoding it). */
673  mjpegdecctx->bayer = 1;
674  }
675 
676  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
677  if (ret < 0) {
678  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
679  return ret;
680  }
681 
682  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
683  if (ret < 0) {
684  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
685 
686  /* Normally skip, error if explode */
687  if (avctx->err_recognition & AV_EF_EXPLODE)
688  return AVERROR_INVALIDDATA;
689  else
690  return 0;
691  }
692 
693  is_u16 = (s->bpp > 8);
694 
695  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
696 
697  if (s->jpgframe->width != s->avctx_mjpeg->width ||
698  s->jpgframe->height != s->avctx_mjpeg->height ||
699  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
700  return AVERROR_INVALIDDATA;
701 
702  /* See dng_blit for explanation */
703  if (s->avctx_mjpeg->width == w * 2 &&
704  s->avctx_mjpeg->height == h / 2 &&
705  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
706  is_single_comp = 1;
707  } else if (s->avctx_mjpeg->width >= w &&
708  s->avctx_mjpeg->height >= h &&
709  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
710  ) {
711  is_single_comp = 0;
712  } else
713  return AVERROR_INVALIDDATA;
714 
715  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
716 
717  if (is_single_comp && !is_u16) {
718  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
719  av_frame_unref(s->jpgframe);
720  return AVERROR_PATCHWELCOME;
721  }
722 
723  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
724  dst_data = frame->data[0] + dst_offset * pixel_size;
725  src_data = s->jpgframe->data[0];
726 
727  dng_blit(s,
728  dst_data,
729  frame->linesize[0] / pixel_size,
730  src_data,
731  s->jpgframe->linesize[0] / pixel_size,
732  w,
733  h,
734  is_single_comp,
735  is_u16, 0);
736 
737  av_frame_unref(s->jpgframe);
738 
739  return 0;
740 }
741 
742 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
743  const uint8_t *src, int size, int strip_start, int lines)
744 {
745  PutByteContext pb;
746  int c, line, pixels, code, ret;
747  const uint8_t *ssrc = src;
748  int width = ((s->width * s->bpp) + 7) >> 3;
750  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
751  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
752  desc->nb_components >= 3;
753  int is_dng;
754 
755  if (s->planar)
756  width /= s->bppcount;
757 
758  if (size <= 0)
759  return AVERROR_INVALIDDATA;
760 
761  if (is_yuv) {
762  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
763  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
764  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
765  if (s->yuv_line == NULL) {
766  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
767  return AVERROR(ENOMEM);
768  }
769  dst = s->yuv_line;
770  stride = 0;
771 
772  width = (s->width - 1) / s->subsampling[0] + 1;
773  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
774  av_assert0(width <= bytes_per_row);
775  av_assert0(s->bpp == 24);
776  }
777  if (s->is_bayer) {
778  av_assert0(width == (s->bpp * s->width + 7) >> 3);
779  }
780  av_assert0(!(s->is_bayer && is_yuv));
781  if (p->format == AV_PIX_FMT_GRAY12) {
782  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
783  if (s->yuv_line == NULL) {
784  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
785  return AVERROR(ENOMEM);
786  }
787  dst = s->yuv_line;
788  stride = 0;
789  }
790 
791  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
792 #if CONFIG_ZLIB
793  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
794  strip_start, is_yuv);
795 #else
796  av_log(s->avctx, AV_LOG_ERROR,
797  "zlib support not enabled, "
798  "deflate compression not supported\n");
799  return AVERROR(ENOSYS);
800 #endif
801  }
802  if (s->compr == TIFF_LZMA) {
803 #if CONFIG_LZMA
804  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
805  strip_start, is_yuv);
806 #else
807  av_log(s->avctx, AV_LOG_ERROR,
808  "LZMA support not enabled\n");
809  return AVERROR(ENOSYS);
810 #endif
811  }
812  if (s->compr == TIFF_LZW) {
813  if (s->fill_order) {
814  if ((ret = deinvert_buffer(s, src, size)) < 0)
815  return ret;
816  ssrc = src = s->deinvert_buf;
817  }
818  if (size > 1 && !src[0] && (src[1]&1)) {
819  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
820  }
821  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
822  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
823  return ret;
824  }
825  for (line = 0; line < lines; line++) {
826  pixels = ff_lzw_decode(s->lzw, dst, width);
827  if (pixels < width) {
828  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
829  pixels, width);
830  return AVERROR_INVALIDDATA;
831  }
832  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
833  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
834  if (is_yuv) {
835  unpack_yuv(s, p, dst, strip_start + line);
836  line += s->subsampling[1] - 1;
837  } else if (p->format == AV_PIX_FMT_GRAY12) {
838  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
839  }
840  dst += stride;
841  }
842  return 0;
843  }
844  if (s->compr == TIFF_CCITT_RLE ||
845  s->compr == TIFF_G3 ||
846  s->compr == TIFF_G4) {
847  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
848  return AVERROR_INVALIDDATA;
849 
850  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
851  }
852 
853  bytestream2_init(&s->gb, src, size);
854  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
855 
856  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
857 
858  /* Decode JPEG-encoded DNGs with strips */
859  if (s->compr == TIFF_NEWJPEG && is_dng) {
860  if (s->strips > 1) {
861  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
862  return AVERROR_PATCHWELCOME;
863  }
864  if (!s->is_bayer)
865  return AVERROR_PATCHWELCOME;
866  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
867  return ret;
868  return 0;
869  }
870 
871  if (is_dng && stride == 0)
872  return AVERROR_INVALIDDATA;
873 
874  for (line = 0; line < lines; line++) {
875  if (src - ssrc > size) {
876  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
877  return AVERROR_INVALIDDATA;
878  }
879 
880  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
881  break;
882  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
883  switch (s->compr) {
884  case TIFF_RAW:
885  if (ssrc + size - src < width)
886  return AVERROR_INVALIDDATA;
887 
888  if (!s->fill_order) {
889  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
890  dst, 1, src, 0, width, 0);
891  } else {
892  int i;
893  for (i = 0; i < width; i++)
894  dst[i] = ff_reverse[src[i]];
895  }
896 
897  /* Color processing for DNG images with uncompressed strips (non-tiled) */
898  if (is_dng) {
899  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
900 
901  is_u16 = (s->bpp / s->bppcount > 8);
902  pixel_size_bits = (is_u16 ? 16 : 8);
903  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
904 
905  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
906  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
907  dng_blit(s,
908  dst,
909  0, // no stride, only 1 line
910  dst,
911  0, // no stride, only 1 line
912  elements,
913  1,
914  0, // single-component variation is only preset in JPEG-encoded DNGs
915  is_u16,
916  (line + strip_start)&1);
917  }
918 
919  src += width;
920  break;
921  case TIFF_PACKBITS:
922  for (pixels = 0; pixels < width;) {
923  if (ssrc + size - src < 2) {
924  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
925  return AVERROR_INVALIDDATA;
926  }
927  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
928  if (code >= 0) {
929  code++;
930  if (pixels + code > width ||
931  ssrc + size - src < code) {
932  av_log(s->avctx, AV_LOG_ERROR,
933  "Copy went out of bounds\n");
934  return AVERROR_INVALIDDATA;
935  }
936  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
937  dst, 1, src, 0, code, pixels);
938  src += code;
939  pixels += code;
940  } else if (code != -128) { // -127..-1
941  code = (-code) + 1;
942  if (pixels + code > width) {
943  av_log(s->avctx, AV_LOG_ERROR,
944  "Run went out of bounds\n");
945  return AVERROR_INVALIDDATA;
946  }
947  c = *src++;
948  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
949  dst, 0, NULL, c, code, pixels);
950  pixels += code;
951  }
952  }
953  if (s->fill_order) {
954  int i;
955  for (i = 0; i < width; i++)
956  dst[i] = ff_reverse[dst[i]];
957  }
958  break;
959  }
960  if (is_yuv) {
961  unpack_yuv(s, p, dst, strip_start + line);
962  line += s->subsampling[1] - 1;
963  } else if (p->format == AV_PIX_FMT_GRAY12) {
964  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
965  }
966  dst += stride;
967  }
968  return 0;
969 }
970 
972  const AVPacket *avpkt)
973 {
974  TiffContext *s = avctx->priv_data;
975  int tile_idx;
976  int tile_offset_offset, tile_offset;
977  int tile_byte_count_offset, tile_byte_count;
978  int tile_count_x, tile_count_y;
979  int tile_width, tile_length;
980  int has_width_leftover, has_height_leftover;
981  int tile_x = 0, tile_y = 0;
982  int pos_x = 0, pos_y = 0;
983  int ret;
984 
985  if (s->tile_width <= 0 || s->tile_length <= 0)
986  return AVERROR_INVALIDDATA;
987 
988  has_width_leftover = (s->width % s->tile_width != 0);
989  has_height_leftover = (s->height % s->tile_length != 0);
990 
991  /* Calculate tile counts (round up) */
992  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
993  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
994 
995  /* Iterate over the number of tiles */
996  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
997  tile_x = tile_idx % tile_count_x;
998  tile_y = tile_idx / tile_count_x;
999 
1000  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
1001  tile_width = s->width % s->tile_width;
1002  else
1003  tile_width = s->tile_width;
1004 
1005  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1006  tile_length = s->height % s->tile_length;
1007  else
1008  tile_length = s->tile_length;
1009 
1010  /* Read tile offset */
1011  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1012  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1013  tile_offset = ff_tget_long(&s->gb, s->le);
1014 
1015  /* Read tile byte size */
1016  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1017  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1018  tile_byte_count = ff_tget_long(&s->gb, s->le);
1019 
1020  /* Seek to tile data */
1021  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1022 
1023  /* Decode JPEG tile and copy it in the reference frame */
1024  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1025 
1026  if (ret < 0)
1027  return ret;
1028 
1029  /* Advance current positions */
1030  pos_x += tile_width;
1031  if (tile_x == tile_count_x - 1) { // If on the right edge
1032  pos_x = 0;
1033  pos_y += tile_length;
1034  }
1035  }
1036 
1037  /* Frame is ready to be output */
1040 
1041  return avpkt->size;
1042 }
1043 
1045 {
1046  int ret;
1047  int create_gray_palette = 0;
1048 
1049  // make sure there is no aliasing in the following switch
1050  if (s->bpp > 128 || s->bppcount >= 10) {
1051  av_log(s->avctx, AV_LOG_ERROR,
1052  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1053  s->bpp, s->bppcount);
1054  return AVERROR_INVALIDDATA;
1055  }
1056 
1057  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1058  case 11:
1059  if (!s->palette_is_set) {
1060  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1061  break;
1062  }
1063  case 21:
1064  case 41:
1065  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1066  if (!s->palette_is_set) {
1067  create_gray_palette = 1;
1068  }
1069  break;
1070  case 81:
1071  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1072  break;
1073  case 121:
1074  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1075  break;
1076  case 100081:
1077  switch (AV_RL32(s->pattern)) {
1078  case 0x02010100:
1079  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1080  break;
1081  case 0x00010102:
1082  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1083  break;
1084  case 0x01000201:
1085  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1086  break;
1087  case 0x01020001:
1088  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1089  break;
1090  default:
1091  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1092  AV_RL32(s->pattern));
1093  return AVERROR_PATCHWELCOME;
1094  }
1095  break;
1096  case 100101:
1097  case 100121:
1098  case 100141:
1099  case 100161:
1100  switch (AV_RL32(s->pattern)) {
1101  case 0x02010100:
1102  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1103  break;
1104  case 0x00010102:
1105  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1106  break;
1107  case 0x01000201:
1108  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1109  break;
1110  case 0x01020001:
1111  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1112  break;
1113  default:
1114  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1115  AV_RL32(s->pattern));
1116  return AVERROR_PATCHWELCOME;
1117  }
1118  break;
1119  case 243:
1120  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1121  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1122  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1123  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1124  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1125  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1126  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1127  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1128  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1129  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1130  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1131  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1132  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1133  } else {
1134  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1135  return AVERROR_PATCHWELCOME;
1136  }
1137  } else
1138  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1139  break;
1140  case 161:
1141  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1142  break;
1143  case 162:
1144  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1145  break;
1146  case 322:
1147  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1148  break;
1149  case 324:
1150  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1151  break;
1152  case 405:
1153  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1154  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1155  else {
1156  av_log(s->avctx, AV_LOG_ERROR,
1157  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1158  return AVERROR_PATCHWELCOME;
1159  }
1160  break;
1161  case 483:
1162  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1163  break;
1164  case 644:
1165  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1166  break;
1167  case 10243:
1168  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1169  break;
1170  case 10324:
1171  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1172  break;
1173  case 10483:
1174  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1175  break;
1176  case 10644:
1177  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1178  break;
1179  case 963:
1180  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1181  break;
1182  case 1284:
1183  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1184  break;
1185  case 10963:
1186  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1187  break;
1188  case 11284:
1189  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1190  break;
1191  default:
1192  av_log(s->avctx, AV_LOG_ERROR,
1193  "This format is not supported (bpp=%d, bppcount=%d)\n",
1194  s->bpp, s->bppcount);
1195  return AVERROR_INVALIDDATA;
1196  }
1197 
1198  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1199  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1200  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1201  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1202  desc->nb_components < 3) {
1203  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1204  return AVERROR_INVALIDDATA;
1205  }
1206  }
1207 
1208  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1209  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1210  if (ret < 0)
1211  return ret;
1212  }
1213 
1214  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1215  return 0;
1216 
1217  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1218  return ret;
1219  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1220  if (!create_gray_palette)
1221  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1222  else {
1223  /* make default grayscale pal */
1224  int i;
1225  uint32_t *pal = (uint32_t *)frame->data[1];
1226  for (i = 0; i < 1<<s->bpp; i++)
1227  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1228  }
1229  }
1230  return 1;
1231 }
1232 
1233 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1234 {
1235  int offset = tag == TIFF_YRES ? 2 : 0;
1236  s->res[offset++] = num;
1237  s->res[offset] = den;
1238  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1239  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1240  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1241  if (num > INT64_MAX || den > INT64_MAX) {
1242  num = num >> 1;
1243  den = den >> 1;
1244  }
1245  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1246  num, den, INT32_MAX);
1247  if (!s->avctx->sample_aspect_ratio.den)
1248  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1249  }
1250 }
1251 
1253 {
1254  AVFrameSideData *sd;
1255  GetByteContext gb_temp;
1256  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1257  int i, start;
1258  int pos;
1259  int ret;
1260  double *dp;
1261 
1262  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1263  if (ret < 0) {
1264  goto end;
1265  }
1266  if (tag <= s->last_tag)
1267  return AVERROR_INVALIDDATA;
1268 
1269  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1270  if (tag != TIFF_STRIP_SIZE)
1271  s->last_tag = tag;
1272 
1273  off = bytestream2_tell(&s->gb);
1274  if (count == 1) {
1275  switch (type) {
1276  case TIFF_BYTE:
1277  case TIFF_SHORT:
1278  case TIFF_LONG:
1279  value = ff_tget(&s->gb, type, s->le);
1280  break;
1281  case TIFF_RATIONAL:
1282  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1283  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1284  if (!value2) {
1285  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1286  value2 = 1;
1287  }
1288 
1289  break;
1290  case TIFF_STRING:
1291  if (count <= 4) {
1292  break;
1293  }
1294  default:
1295  value = UINT_MAX;
1296  }
1297  }
1298 
1299  switch (tag) {
1300  case TIFF_SUBFILE:
1301  s->is_thumbnail = (value != 0);
1302  break;
1303  case TIFF_WIDTH:
1304  s->width = value;
1305  break;
1306  case TIFF_HEIGHT:
1307  s->height = value;
1308  break;
1309  case TIFF_BPP:
1310  if (count > 5 || count <= 0) {
1311  av_log(s->avctx, AV_LOG_ERROR,
1312  "This format is not supported (bpp=%d, %d components)\n",
1313  value, count);
1314  return AVERROR_INVALIDDATA;
1315  }
1316  s->bppcount = count;
1317  if (count == 1)
1318  s->bpp = value;
1319  else {
1320  switch (type) {
1321  case TIFF_BYTE:
1322  case TIFF_SHORT:
1323  case TIFF_LONG:
1324  s->bpp = 0;
1325  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1326  return AVERROR_INVALIDDATA;
1327  for (i = 0; i < count; i++)
1328  s->bpp += ff_tget(&s->gb, type, s->le);
1329  break;
1330  default:
1331  s->bpp = -1;
1332  }
1333  }
1334  break;
1336  if (count != 1) {
1337  av_log(s->avctx, AV_LOG_ERROR,
1338  "Samples per pixel requires a single value, many provided\n");
1339  return AVERROR_INVALIDDATA;
1340  }
1341  if (value > 5 || value <= 0) {
1342  av_log(s->avctx, AV_LOG_ERROR,
1343  "Invalid samples per pixel %d\n", value);
1344  return AVERROR_INVALIDDATA;
1345  }
1346  if (s->bppcount == 1)
1347  s->bpp *= value;
1348  s->bppcount = value;
1349  break;
1350  case TIFF_COMPR:
1351  s->compr = value;
1352  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1353  s->predictor = 0;
1354  switch (s->compr) {
1355  case TIFF_RAW:
1356  case TIFF_PACKBITS:
1357  case TIFF_LZW:
1358  case TIFF_CCITT_RLE:
1359  break;
1360  case TIFF_G3:
1361  case TIFF_G4:
1362  s->fax_opts = 0;
1363  break;
1364  case TIFF_DEFLATE:
1365  case TIFF_ADOBE_DEFLATE:
1366 #if CONFIG_ZLIB
1367  break;
1368 #else
1369  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1370  return AVERROR(ENOSYS);
1371 #endif
1372  case TIFF_JPEG:
1373  case TIFF_NEWJPEG:
1374  s->is_jpeg = 1;
1375  break;
1376  case TIFF_LZMA:
1377 #if CONFIG_LZMA
1378  break;
1379 #else
1380  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1381  return AVERROR(ENOSYS);
1382 #endif
1383  default:
1384  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1385  s->compr);
1386  return AVERROR_INVALIDDATA;
1387  }
1388  break;
1389  case TIFF_ROWSPERSTRIP:
1390  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1391  value = s->height;
1392  s->rps = FFMIN(value, s->height);
1393  break;
1394  case TIFF_STRIP_OFFS:
1395  if (count == 1) {
1396  if (value > INT_MAX) {
1397  av_log(s->avctx, AV_LOG_ERROR,
1398  "strippos %u too large\n", value);
1399  return AVERROR_INVALIDDATA;
1400  }
1401  s->strippos = 0;
1402  s->stripoff = value;
1403  } else
1404  s->strippos = off;
1405  s->strips = count;
1406  if (s->strips == s->bppcount)
1407  s->rps = s->height;
1408  s->sot = type;
1409  break;
1410  case TIFF_STRIP_SIZE:
1411  if (count == 1) {
1412  if (value > INT_MAX) {
1413  av_log(s->avctx, AV_LOG_ERROR,
1414  "stripsize %u too large\n", value);
1415  return AVERROR_INVALIDDATA;
1416  }
1417  s->stripsizesoff = 0;
1418  s->stripsize = value;
1419  s->strips = 1;
1420  } else {
1421  s->stripsizesoff = off;
1422  }
1423  s->strips = count;
1424  s->sstype = type;
1425  break;
1426  case TIFF_XRES:
1427  case TIFF_YRES:
1428  set_sar(s, tag, value, value2);
1429  break;
1430  case TIFF_TILE_OFFSETS:
1431  s->tile_offsets_offset = off;
1432  s->is_tiled = 1;
1433  break;
1434  case TIFF_TILE_BYTE_COUNTS:
1435  s->tile_byte_counts_offset = off;
1436  break;
1437  case TIFF_TILE_LENGTH:
1438  s->tile_length = value;
1439  break;
1440  case TIFF_TILE_WIDTH:
1441  s->tile_width = value;
1442  break;
1443  case TIFF_PREDICTOR:
1444  s->predictor = value;
1445  break;
1446  case TIFF_SUB_IFDS:
1447  if (count == 1)
1448  s->sub_ifd = value;
1449  else if (count > 1)
1450  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1451  break;
1454  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1455  return AVERROR_INVALIDDATA;
1456  for (int i = 0; i < count; i++)
1457  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1458  s->white_level = s->dng_lut[count-1];
1459  break;
1460  case DNG_BLACK_LEVEL:
1461  if (count > FF_ARRAY_ELEMS(s->black_level))
1462  return AVERROR_INVALIDDATA;
1463  s->black_level[0] = value / (float)value2;
1464  for (int i = 0; i < count && count > 1; i++) {
1465  if (type == TIFF_RATIONAL) {
1466  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1467  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1468  if (!value2) {
1469  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1470  value2 = 1;
1471  }
1472 
1473  s->black_level[i] = value / (float)value2;
1474  } else if (type == TIFF_SRATIONAL) {
1475  int value = ff_tget(&s->gb, TIFF_LONG, s->le);
1476  int value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1477  if (!value2) {
1478  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1479  value2 = 1;
1480  }
1481 
1482  s->black_level[i] = value / (float)value2;
1483  } else {
1484  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1485  }
1486  }
1487  for (int i = count; i < 4 && count > 0; i++)
1488  s->black_level[i] = s->black_level[count - 1];
1489  break;
1490  case DNG_WHITE_LEVEL:
1491  s->white_level = value;
1492  break;
1493  case TIFF_CFA_PATTERN_DIM:
1494  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1495  ff_tget(&s->gb, type, s->le) != 2)) {
1496  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1497  return AVERROR_INVALIDDATA;
1498  }
1499  break;
1500  case TIFF_CFA_PATTERN:
1501  s->is_bayer = 1;
1502  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1503  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1504  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1505  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1506  break;
1507  case TIFF_PHOTOMETRIC:
1508  switch (value) {
1511  case TIFF_PHOTOMETRIC_RGB:
1515  case TIFF_PHOTOMETRIC_CFA:
1516  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1517  s->photometric = value;
1518  break;
1526  "PhotometricInterpretation 0x%04X",
1527  value);
1528  return AVERROR_PATCHWELCOME;
1529  default:
1530  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1531  "unknown\n", value);
1532  return AVERROR_INVALIDDATA;
1533  }
1534  break;
1535  case TIFF_FILL_ORDER:
1536  if (value < 1 || value > 2) {
1537  av_log(s->avctx, AV_LOG_ERROR,
1538  "Unknown FillOrder value %d, trying default one\n", value);
1539  value = 1;
1540  }
1541  s->fill_order = value - 1;
1542  break;
1543  case TIFF_PAL: {
1544  GetByteContext pal_gb[3];
1545  off = type_sizes[type];
1546  if (count / 3 > 256 ||
1547  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1548  return AVERROR_INVALIDDATA;
1549 
1550  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1551  bytestream2_skip(&pal_gb[1], count / 3 * off);
1552  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1553 
1554  off = (type_sizes[type] - 1) << 3;
1555  if (off > 31U) {
1556  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1557  return AVERROR_INVALIDDATA;
1558  }
1559 
1560  for (i = 0; i < count / 3; i++) {
1561  uint32_t p = 0xFF000000;
1562  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1563  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1564  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1565  s->palette[i] = p;
1566  }
1567  s->palette_is_set = 1;
1568  break;
1569  }
1570  case TIFF_PLANAR:
1571  s->planar = value == 2;
1572  break;
1574  if (count != 2) {
1575  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1576  return AVERROR_INVALIDDATA;
1577  }
1578  for (i = 0; i < count; i++) {
1579  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1580  if (s->subsampling[i] <= 0) {
1581  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1582  s->subsampling[i] = 1;
1583  return AVERROR_INVALIDDATA;
1584  }
1585  }
1586  break;
1587  case TIFF_T4OPTIONS:
1588  if (s->compr == TIFF_G3)
1589  s->fax_opts = value;
1590  break;
1591  case TIFF_T6OPTIONS:
1592  if (s->compr == TIFF_G4)
1593  s->fax_opts = value;
1594  break;
1595 #define ADD_METADATA(count, name, sep)\
1596  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1597  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1598  goto end;\
1599  }
1601  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1602  break;
1604  ADD_METADATA(count, "ModelTransformationTag", NULL);
1605  break;
1606  case TIFF_MODEL_TIEPOINT:
1607  ADD_METADATA(count, "ModelTiepointTag", NULL);
1608  break;
1610  if (s->geotag_count) {
1611  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1612  return AVERROR_INVALIDDATA;
1613  }
1614  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1615  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1616  s->geotag_count = ff_tget_short(&s->gb, s->le);
1617  if (s->geotag_count > count / 4 - 1) {
1618  s->geotag_count = count / 4 - 1;
1619  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1620  }
1621  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1622  || s->geotag_count == 0) {
1623  s->geotag_count = 0;
1624  return -1;
1625  }
1626  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1627  if (!s->geotags) {
1628  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1629  s->geotag_count = 0;
1630  goto end;
1631  }
1632  for (i = 0; i < s->geotag_count; i++) {
1633  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1634  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1635  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1636 
1637  if (!s->geotags[i].type)
1638  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1639  else
1640  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1641  }
1642  break;
1644  if (count >= INT_MAX / sizeof(int64_t))
1645  return AVERROR_INVALIDDATA;
1646  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1647  return AVERROR_INVALIDDATA;
1648  dp = av_malloc_array(count, sizeof(double));
1649  if (!dp) {
1650  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1651  goto end;
1652  }
1653  for (i = 0; i < count; i++)
1654  dp[i] = ff_tget_double(&s->gb, s->le);
1655  for (i = 0; i < s->geotag_count; i++) {
1656  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1657  if (s->geotags[i].count == 0
1658  || s->geotags[i].offset + s->geotags[i].count > count) {
1659  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1660  } else if (s->geotags[i].val) {
1661  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1662  } else {
1663  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1664  if (!ap) {
1665  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1666  av_freep(&dp);
1667  return AVERROR(ENOMEM);
1668  }
1669  s->geotags[i].val = ap;
1670  }
1671  }
1672  }
1673  av_freep(&dp);
1674  break;
1675  case TIFF_GEO_ASCII_PARAMS:
1676  pos = bytestream2_tell(&s->gb);
1677  for (i = 0; i < s->geotag_count; i++) {
1678  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1679  if (s->geotags[i].count == 0
1680  || s->geotags[i].offset + s->geotags[i].count > count) {
1681  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1682  } else {
1683  char *ap;
1684 
1685  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1686  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1687  return AVERROR_INVALIDDATA;
1688  if (s->geotags[i].val)
1689  return AVERROR_INVALIDDATA;
1690  ap = av_malloc(s->geotags[i].count);
1691  if (!ap) {
1692  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1693  return AVERROR(ENOMEM);
1694  }
1695  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1696  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1697  s->geotags[i].val = ap;
1698  }
1699  }
1700  }
1701  break;
1702  case TIFF_ICC_PROFILE:
1703  gb_temp = s->gb;
1704  bytestream2_seek(&gb_temp, SEEK_SET, off);
1705 
1706  if (bytestream2_get_bytes_left(&gb_temp) < count)
1707  return AVERROR_INVALIDDATA;
1708 
1710  if (!sd)
1711  return AVERROR(ENOMEM);
1712 
1713  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1714  break;
1715  case TIFF_ARTIST:
1716  ADD_METADATA(count, "artist", NULL);
1717  break;
1718  case TIFF_COPYRIGHT:
1719  ADD_METADATA(count, "copyright", NULL);
1720  break;
1721  case TIFF_DATE:
1722  ADD_METADATA(count, "date", NULL);
1723  break;
1724  case TIFF_DOCUMENT_NAME:
1725  ADD_METADATA(count, "document_name", NULL);
1726  break;
1727  case TIFF_HOST_COMPUTER:
1728  ADD_METADATA(count, "computer", NULL);
1729  break;
1731  ADD_METADATA(count, "description", NULL);
1732  break;
1733  case TIFF_MAKE:
1734  ADD_METADATA(count, "make", NULL);
1735  break;
1736  case TIFF_MODEL:
1737  ADD_METADATA(count, "model", NULL);
1738  break;
1739  case TIFF_PAGE_NAME:
1740  ADD_METADATA(count, "page_name", NULL);
1741  break;
1742  case TIFF_PAGE_NUMBER:
1743  ADD_METADATA(count, "page_number", " / ");
1744  // need to seek back to re-read the page number
1745  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1746  // read the page number
1747  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1748  // get back to where we were before the previous seek
1749  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1750  break;
1751  case TIFF_SOFTWARE_NAME:
1752  ADD_METADATA(count, "software", NULL);
1753  break;
1754  case DNG_VERSION:
1755  if (count == 4) {
1756  unsigned int ver[4];
1757  ver[0] = ff_tget(&s->gb, type, s->le);
1758  ver[1] = ff_tget(&s->gb, type, s->le);
1759  ver[2] = ff_tget(&s->gb, type, s->le);
1760  ver[3] = ff_tget(&s->gb, type, s->le);
1761 
1762  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1763  ver[0], ver[1], ver[2], ver[3]);
1764 
1766  }
1767  break;
1768  case DNG_ANALOG_BALANCE:
1769  if (type != TIFF_RATIONAL)
1770  break;
1771 
1772  for (int i = 0; i < 3; i++) {
1773  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1774  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1775  if (!value2) {
1776  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1777  value2 = 1;
1778  }
1779 
1780  s->analog_balance[i] = value / (float)value2;
1781  }
1782  break;
1783  case DNG_AS_SHOT_NEUTRAL:
1784  if (type != TIFF_RATIONAL)
1785  break;
1786 
1787  for (int i = 0; i < 3; i++) {
1788  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1789  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1790  if (!value2) {
1791  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1792  value2 = 1;
1793  }
1794 
1795  s->as_shot_neutral[i] = value / (float)value2;
1796  }
1797  break;
1798  case DNG_AS_SHOT_WHITE_XY:
1799  if (type != TIFF_RATIONAL)
1800  break;
1801 
1802  for (int i = 0; i < 2; i++) {
1803  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1804  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1805  if (!value2) {
1806  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1807  value2 = 1;
1808  }
1809 
1810  s->as_shot_white[i] = value / (float)value2;
1811  }
1812  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1813  for (int i = 0; i < 3; i++) {
1814  s->as_shot_white[i] /= d65_white[i];
1815  }
1816  break;
1817  case DNG_COLOR_MATRIX1:
1818  case DNG_COLOR_MATRIX2:
1819  for (int i = 0; i < 3; i++) {
1820  for (int j = 0; j < 3; j++) {
1821  int value = ff_tget(&s->gb, TIFF_LONG, s->le);
1822  int value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1823  if (!value2) {
1824  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1825  value2 = 1;
1826  }
1827  s->color_matrix[i][j] = value / (float)value2;
1828  }
1829  s->use_color_matrix = 1;
1830  }
1831  break;
1834  for (int i = 0; i < 3; i++) {
1835  for (int j = 0; j < 3; j++) {
1836  int value = ff_tget(&s->gb, TIFF_LONG, s->le);
1837  int value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1838  if (!value2) {
1839  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1840  value2 = 1;
1841  }
1842  s->camera_calibration[i][j] = value / (float)value2;
1843  }
1844  }
1845  break;
1846  case CINEMADNG_TIME_CODES:
1847  case CINEMADNG_FRAME_RATE:
1848  case CINEMADNG_T_STOP:
1849  case CINEMADNG_REEL_NAME:
1852  break;
1853  default:
1854  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1855  av_log(s->avctx, AV_LOG_ERROR,
1856  "Unknown or unsupported tag %d/0x%0X\n",
1857  tag, tag);
1858  return AVERROR_INVALIDDATA;
1859  }
1860  }
1861 end:
1862  if (s->bpp > 128U) {
1863  av_log(s->avctx, AV_LOG_ERROR,
1864  "This format is not supported (bpp=%d, %d components)\n",
1865  s->bpp, count);
1866  s->bpp = 0;
1867  return AVERROR_INVALIDDATA;
1868  }
1869  bytestream2_seek(&s->gb, start, SEEK_SET);
1870  return 0;
1871 }
1872 
1873 static const float xyz2rgb[3][3] = {
1874  { 0.412453f, 0.357580f, 0.180423f },
1875  { 0.212671f, 0.715160f, 0.072169f },
1876  { 0.019334f, 0.119193f, 0.950227f },
1877 };
1878 
1880  float rgb2cam[3][4],
1881  double cam2xyz[4][3])
1882 {
1883  double cam2rgb[4][3], num;
1884  int i, j, k;
1885 
1886  for (i = 0; i < 3; i++) {
1887  for (j = 0; j < 3; j++) {
1888  cam2rgb[i][j] = 0.;
1889  for (k = 0; k < 3; k++)
1890  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1891  }
1892  }
1893 
1894  for (i = 0; i < 3; i++) {
1895  for (num = j = 0; j < 3; j++)
1896  num += cam2rgb[i][j];
1897  if (!num)
1898  num = 1;
1899  for (j = 0; j < 3; j++)
1900  cam2rgb[i][j] /= num;
1901  s->premultiply[i] = 1.f / num;
1902  }
1903 }
1904 
1905 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1906  int *got_frame, AVPacket *avpkt)
1907 {
1908  TiffContext *const s = avctx->priv_data;
1909  unsigned off, last_off = 0;
1910  int le, ret, plane, planes;
1911  int i, j, entries, stride;
1912  unsigned soff, ssize;
1913  uint8_t *dst;
1914  GetByteContext stripsizes;
1915  GetByteContext stripdata;
1916  int retry_for_subifd, retry_for_page;
1917  int is_dng;
1918  int has_tile_bits, has_strip_bits;
1919 
1920  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1921 
1922  // parse image header
1923  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1924  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1925  return ret;
1926  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1927  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1928  return AVERROR_INVALIDDATA;
1929  }
1930  s->le = le;
1931  // TIFF_BPP is not a required tag and defaults to 1
1932 
1933  s->tiff_type = TIFF_TYPE_TIFF;
1934  s->use_color_matrix = 0;
1935 again:
1936  s->is_thumbnail = 0;
1937  s->bppcount = s->bpp = 1;
1938  s->photometric = TIFF_PHOTOMETRIC_NONE;
1939  s->compr = TIFF_RAW;
1940  s->fill_order = 0;
1941  s->white_level = 0;
1942  s->is_bayer = 0;
1943  s->is_tiled = 0;
1944  s->is_jpeg = 0;
1945  s->cur_page = 0;
1946  s->last_tag = 0;
1947 
1948  for (i = 0; i < 65536; i++)
1949  s->dng_lut[i] = i;
1950 
1951  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1952  s->black_level[i] = 0.f;
1953 
1954  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1955  s->as_shot_neutral[i] = 0.f;
1956 
1957  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1958  s->as_shot_white[i] = 1.f;
1959 
1960  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1961  s->analog_balance[i] = 1.f;
1962 
1963  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1964  s->premultiply[i] = 1.f;
1965 
1966  for (i = 0; i < 4; i++)
1967  for (j = 0; j < 4; j++)
1968  s->camera_calibration[i][j] = i == j;
1969 
1970  free_geotags(s);
1971 
1972  // Reset these offsets so we can tell if they were set this frame
1973  s->stripsizesoff = s->strippos = 0;
1974  /* parse image file directory */
1975  bytestream2_seek(&s->gb, off, SEEK_SET);
1976  entries = ff_tget_short(&s->gb, le);
1977  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1978  return AVERROR_INVALIDDATA;
1979  for (i = 0; i < entries; i++) {
1980  if ((ret = tiff_decode_tag(s, p)) < 0)
1981  return ret;
1982  }
1983 
1984  if (s->get_thumbnail && !s->is_thumbnail) {
1985  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1986  return AVERROR_EOF;
1987  }
1988 
1989  /** whether we should process this IFD's SubIFD */
1990  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1991  /** whether we should process this multi-page IFD's next page */
1992  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1993 
1994  if (retry_for_page) {
1995  // set offset to the next IFD
1996  off = ff_tget_long(&s->gb, le);
1997  } else if (retry_for_subifd) {
1998  // set offset to the SubIFD
1999  off = s->sub_ifd;
2000  }
2001 
2002  if (retry_for_subifd || retry_for_page) {
2003  if (!off) {
2004  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2005  return AVERROR_INVALIDDATA;
2006  }
2007  if (off <= last_off) {
2008  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2009  return AVERROR_INVALIDDATA;
2010  }
2011  last_off = off;
2012  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2013  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2014  return AVERROR_INVALIDDATA;
2015  }
2016  s->sub_ifd = 0;
2017  goto again;
2018  }
2019 
2020  /* At this point we've decided on which (Sub)IFD to process */
2021 
2022  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2023 
2024  for (i = 0; i<s->geotag_count; i++) {
2025  const char *keyname = get_geokey_name(s->geotags[i].key);
2026  if (!keyname) {
2027  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2028  continue;
2029  }
2030  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2031  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2032  continue;
2033  }
2034  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
2035  if (ret<0) {
2036  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2037  return ret;
2038  }
2039  }
2040 
2041  if (is_dng) {
2042  double cam2xyz[4][3];
2043  float cmatrix[3][4];
2044  float pmin = FLT_MAX;
2045  int bps;
2046 
2047  for (i = 0; i < 3; i++) {
2048  for (j = 0; j < 3; j++)
2049  s->camera_calibration[i][j] *= s->analog_balance[i];
2050  }
2051 
2052  if (!s->use_color_matrix) {
2053  for (i = 0; i < 3; i++) {
2054  if (s->camera_calibration[i][i])
2055  s->premultiply[i] /= s->camera_calibration[i][i];
2056  }
2057  } else {
2058  for (int c = 0; c < 3; c++) {
2059  for (i = 0; i < 3; i++) {
2060  cam2xyz[c][i] = 0.;
2061  for (j = 0; j < 3; j++)
2062  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2063  }
2064  }
2065 
2066  camera_xyz_coeff(s, cmatrix, cam2xyz);
2067  }
2068 
2069  for (int c = 0; c < 3; c++)
2070  pmin = fminf(pmin, s->premultiply[c]);
2071 
2072  for (int c = 0; c < 3; c++)
2073  s->premultiply[c] /= pmin;
2074 
2075  if (s->bpp % s->bppcount)
2076  return AVERROR_INVALIDDATA;
2077  bps = s->bpp / s->bppcount;
2078  if (bps < 8 || bps > 32)
2079  return AVERROR_INVALIDDATA;
2080 
2081  if (s->white_level == 0)
2082  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2083 
2084  if (s->white_level <= s->black_level[0]) {
2085  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2086  s->black_level[0], s->white_level);
2087  return AVERROR_INVALIDDATA;
2088  }
2089 
2090  if (s->planar)
2091  return AVERROR_PATCHWELCOME;
2092  }
2093 
2094  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2095  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2096  return AVERROR_INVALIDDATA;
2097  }
2098 
2099  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2100  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2101 
2102  if (has_tile_bits && has_strip_bits) {
2103  int tiled_dng = s->is_tiled && is_dng;
2104  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2105  if (!tiled_dng)
2106  return AVERROR_INVALIDDATA;
2107  }
2108 
2109  /* now we have the data and may start decoding */
2110  if ((ret = init_image(s, p)) <= 0)
2111  return ret;
2112 
2113  if (!s->is_tiled || has_strip_bits) {
2114  if (s->strips == 1 && !s->stripsize) {
2115  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2116  s->stripsize = avpkt->size - s->stripoff;
2117  }
2118 
2119  if (s->stripsizesoff) {
2120  if (s->stripsizesoff >= (unsigned)avpkt->size)
2121  return AVERROR_INVALIDDATA;
2122  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2123  avpkt->size - s->stripsizesoff);
2124  }
2125  if (s->strippos) {
2126  if (s->strippos >= (unsigned)avpkt->size)
2127  return AVERROR_INVALIDDATA;
2128  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2129  avpkt->size - s->strippos);
2130  }
2131 
2132  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2133  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2134  return AVERROR_INVALIDDATA;
2135  }
2136  }
2137 
2138  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2139  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2141  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2143  }
2144 
2145  /* Handle DNG images with JPEG-compressed tiles */
2146 
2147  if (is_dng && s->is_tiled) {
2148  if (!s->is_jpeg) {
2149  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2150  return AVERROR_PATCHWELCOME;
2151  } else if (!s->is_bayer) {
2152  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2153  return AVERROR_PATCHWELCOME;
2154  } else {
2155  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2156  *got_frame = 1;
2157  return ret;
2158  }
2159  }
2160 
2161  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2162 
2163  planes = s->planar ? s->bppcount : 1;
2164  for (plane = 0; plane < planes; plane++) {
2165  uint8_t *five_planes = NULL;
2166  int remaining = avpkt->size;
2167  int decoded_height;
2168  stride = p->linesize[plane];
2169  dst = p->data[plane];
2170  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2171  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2172  stride = stride * 5 / 4;
2173  five_planes =
2174  dst = av_malloc(stride * s->height);
2175  if (!dst)
2176  return AVERROR(ENOMEM);
2177  }
2178  for (i = 0; i < s->height; i += s->rps) {
2179  if (i)
2180  dst += s->rps * stride;
2181  if (s->stripsizesoff)
2182  ssize = ff_tget(&stripsizes, s->sstype, le);
2183  else
2184  ssize = s->stripsize;
2185 
2186  if (s->strippos)
2187  soff = ff_tget(&stripdata, s->sot, le);
2188  else
2189  soff = s->stripoff;
2190 
2191  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2192  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2193  av_freep(&five_planes);
2194  return AVERROR_INVALIDDATA;
2195  }
2196  remaining -= ssize;
2197  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2198  FFMIN(s->rps, s->height - i))) < 0) {
2199  if (avctx->err_recognition & AV_EF_EXPLODE) {
2200  av_freep(&five_planes);
2201  return ret;
2202  }
2203  break;
2204  }
2205  }
2206  decoded_height = FFMIN(i, s->height);
2207 
2208  if (s->predictor == 2) {
2209  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2210  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2211  return AVERROR_PATCHWELCOME;
2212  }
2213  dst = five_planes ? five_planes : p->data[plane];
2214  soff = s->bpp >> 3;
2215  if (s->planar)
2216  soff = FFMAX(soff / s->bppcount, 1);
2217  ssize = s->width * soff;
2218  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2219  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2220  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2221  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2222  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2223  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2224  for (i = 0; i < decoded_height; i++) {
2225  for (j = soff; j < ssize; j += 2)
2226  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2227  dst += stride;
2228  }
2229  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2230  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2231  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2232  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2233  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2234  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2235  for (i = 0; i < decoded_height; i++) {
2236  for (j = soff; j < ssize; j += 2)
2237  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2238  dst += stride;
2239  }
2240  } else {
2241  for (i = 0; i < decoded_height; i++) {
2242  for (j = soff; j < ssize; j++)
2243  dst[j] += dst[j - soff];
2244  dst += stride;
2245  }
2246  }
2247  }
2248 
2249  /* Floating point predictor
2250  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2251  if (s->predictor == 3) {
2252  int channels = s->bppcount;
2253  int group_size;
2254  uint8_t *tmpbuf;
2255  int bpc;
2256 
2257  dst = five_planes ? five_planes : p->data[plane];
2258  soff = s->bpp >> 3;
2259  if (s->planar) {
2260  soff = FFMAX(soff / s->bppcount, 1);
2261  channels = 1;
2262  }
2263  ssize = s->width * soff;
2264  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2265  group_size = s->width * channels;
2266 
2267  tmpbuf = av_malloc(ssize);
2268  if (!tmpbuf)
2269  return AVERROR(ENOMEM);
2270 
2271  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2272  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2273  for (i = 0; i < decoded_height; i++) {
2274  /* Copy first sample byte for each channel */
2275  for (j = 0; j < channels; j++)
2276  tmpbuf[j] = dst[j];
2277 
2278  /* Decode horizontal differences */
2279  for (j = channels; j < ssize; j++)
2280  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2281 
2282  /* Combine shuffled bytes from their separate groups. Each
2283  byte of every floating point value in a row of pixels is
2284  split and combined into separate groups. A group of all
2285  the sign/exponents bytes in the row and groups for each
2286  of the upper, mid, and lower mantissa bytes in the row. */
2287  for (j = 0; j < group_size; j++) {
2288  for (int k = 0; k < bpc; k++) {
2289  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2290  }
2291  }
2292  dst += stride;
2293  }
2294  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2295  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2296  /* Same as LE only the shuffle at the end is reversed */
2297  for (i = 0; i < decoded_height; i++) {
2298  for (j = 0; j < channels; j++)
2299  tmpbuf[j] = dst[j];
2300 
2301  for (j = channels; j < ssize; j++)
2302  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2303 
2304  for (j = 0; j < group_size; j++) {
2305  for (int k = 0; k < bpc; k++) {
2306  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2307  }
2308  }
2309  dst += stride;
2310  }
2311  } else {
2312  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2313  }
2314  av_free(tmpbuf);
2315  }
2316 
2317  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2318  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2319  dst = p->data[plane];
2320  for (i = 0; i < s->height; i++) {
2321  for (j = 0; j < stride; j++)
2322  dst[j] = c - dst[j];
2323  dst += stride;
2324  }
2325  }
2326 
2327  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2328  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2329  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2330  uint8_t *src = five_planes ? five_planes : p->data[plane];
2331  dst = p->data[plane];
2332  for (i = 0; i < s->height; i++) {
2333  for (j = 0; j < s->width; j++) {
2334  int k = 255 - src[x * j + 3];
2335  int r = (255 - src[x * j ]) * k;
2336  int g = (255 - src[x * j + 1]) * k;
2337  int b = (255 - src[x * j + 2]) * k;
2338  dst[4 * j ] = r * 257 >> 16;
2339  dst[4 * j + 1] = g * 257 >> 16;
2340  dst[4 * j + 2] = b * 257 >> 16;
2341  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2342  }
2343  src += stride;
2344  dst += p->linesize[plane];
2345  }
2346  av_freep(&five_planes);
2347  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2348  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2349  dst = p->data[plane];
2350  for (i = 0; i < s->height; i++) {
2351  for (j = 0; j < s->width; j++) {
2352  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2353  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2354  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2355  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2356  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2357  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2358  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2359  AV_WB16(dst + 8 * j + 6, 65535);
2360  }
2361  dst += p->linesize[plane];
2362  }
2363  }
2364  }
2365 
2366  if (s->planar && s->bppcount > 2) {
2367  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2368  FFSWAP(int, p->linesize[0], p->linesize[2]);
2369  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2370  FFSWAP(int, p->linesize[0], p->linesize[1]);
2371  }
2372 
2373  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2374  uint16_t *dst = (uint16_t *)p->data[0];
2375  for (i = 0; i < s->height; i++) {
2376  for (j = 0; j < s->width; j++)
2377  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2378  dst += stride / 2;
2379  }
2380  }
2381 
2382  p->flags |= AV_FRAME_FLAG_KEY;
2383  *got_frame = 1;
2384 
2385  return avpkt->size;
2386 }
2387 
2389 {
2390  TiffContext *s = avctx->priv_data;
2391  const AVCodec *codec;
2392  int ret;
2393 
2394  s->width = 0;
2395  s->height = 0;
2396  s->subsampling[0] =
2397  s->subsampling[1] = 1;
2398  s->avctx = avctx;
2399  ff_lzw_decode_open(&s->lzw);
2400  if (!s->lzw)
2401  return AVERROR(ENOMEM);
2403 
2404  /* Allocate JPEG frame */
2405  s->jpgframe = av_frame_alloc();
2406  s->jpkt = av_packet_alloc();
2407  if (!s->jpgframe || !s->jpkt)
2408  return AVERROR(ENOMEM);
2409 
2410  /* Prepare everything needed for JPEG decoding */
2412  if (!codec)
2413  return AVERROR_BUG;
2414  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2415  if (!s->avctx_mjpeg)
2416  return AVERROR(ENOMEM);
2417  s->avctx_mjpeg->flags = avctx->flags;
2418  s->avctx_mjpeg->flags2 = avctx->flags2;
2419  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2420  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2421  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2422  ret = avcodec_open2(s->avctx_mjpeg, codec, NULL);
2423  if (ret < 0) {
2424  return ret;
2425  }
2426 
2427  return 0;
2428 }
2429 
2430 static av_cold int tiff_end(AVCodecContext *avctx)
2431 {
2432  TiffContext *const s = avctx->priv_data;
2433 
2434  free_geotags(s);
2435 
2436  ff_lzw_decode_close(&s->lzw);
2437  av_freep(&s->deinvert_buf);
2438  s->deinvert_buf_size = 0;
2439  av_freep(&s->yuv_line);
2440  s->yuv_line_size = 0;
2441  av_frame_free(&s->jpgframe);
2442  av_packet_free(&s->jpkt);
2443  avcodec_free_context(&s->avctx_mjpeg);
2444  return 0;
2445 }
2446 
2447 #define OFFSET(x) offsetof(TiffContext, x)
2448 static const AVOption tiff_options[] = {
2449  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2450  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2451  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2452  { NULL },
2453 };
2454 
2455 static const AVClass tiff_decoder_class = {
2456  .class_name = "TIFF decoder",
2457  .item_name = av_default_item_name,
2458  .option = tiff_options,
2459  .version = LIBAVUTIL_VERSION_INT,
2460 };
2461 
2463  .p.name = "tiff",
2464  CODEC_LONG_NAME("TIFF image"),
2465  .p.type = AVMEDIA_TYPE_VIDEO,
2466  .p.id = AV_CODEC_ID_TIFF,
2467  .priv_data_size = sizeof(TiffContext),
2468  .init = tiff_init,
2469  .close = tiff_end,
2471  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2474  .p.priv_class = &tiff_decoder_class,
2475 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:69
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:660
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:423
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:208
TiffContext::gb
GetByteContext gb
Definition: tiff.c:58
AVCodec
AVCodec.
Definition: codec.h:187
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:150
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2462
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:115
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:133
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:158
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:284
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1252
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:110
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:201
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:134
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:164
get_geokey_val
static char * get_geokey_val(int key, int val)
Definition: tiff.c:182
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:99
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1879
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:579
TiffContext::strippos
int strippos
Definition: tiff.c:106
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:90
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:163
OFFSET
#define OFFSET(x)
Definition: tiff.c:2447
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1412
TiffContext::sot
int sot
Definition: tiff.c:105
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:250
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1517
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:130
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:158
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:195
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2430
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:341
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:111
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:136
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:338
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:92
AVPacket::data
uint8_t * data
Definition: packet.h:491
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:202
AVOption
AVOption.
Definition: opt.h:251
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:40
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:135
FFCodec
Definition: codec_internal.h:127
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:445
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:61
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:88
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:154
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:97
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:162
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:164
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:129
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:98
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:496
TiffGeoTagKeyName
Definition: tiff.h:223
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:193
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:137
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:149
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:114
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:290
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:96
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2448
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:67
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:206
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:54
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:197
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:117
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:75
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:83
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:396
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:471
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:127
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:971
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:87
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:57
GetBitContext
Definition: get_bits.h:108
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:148
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:116
TiffContext::tile_length
int tile_length
Definition: tiff.c:112
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:71
val
static double val(void *priv, double ch)
Definition: aeval.c:78
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:390
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:93
AVCodecContext::dct_algo
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1473
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:184
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:452
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:74
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:91
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:575
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:122
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:106
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:39
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
TiffGeoTag
Definition: tiff.h:215
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:69
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:104
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:48
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:95
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:94
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:122
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:121
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:206
TiffContext::stripsize
int stripsize
Definition: tiff.c:106
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:536
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:132
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:165
TiffContext::width
int width
Definition: tiff.c:70
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:278
g
const char * g
Definition: vf_curves.c:127
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:62
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
fminf
float fminf(float, float)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:713
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:118
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:146
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:652
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:38
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:204
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:87
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:165
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:66
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:56
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1959
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:86
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:174
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:63
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:75
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:76
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
frame
static AVFrame * frame
Definition: demux_decode.c:54
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:173
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:279
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:493
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:309
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:122
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:121
TiffContext::height
int height
Definition: tiff.c:70
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:66
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:187
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:133
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2388
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:92
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:44
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:340
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:199
TiffContext
Definition: tiff.c:55
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:83
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:57
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:203
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:196
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:65
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:196
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:93
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:200
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:96
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:495
MJpegDecodeContext
Definition: mjpegdec.h:54
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:79
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:128
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:37
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:76
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:120
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:60
TIFF_SRATIONAL
@ TIFF_SRATIONAL
Definition: tiff_common.h:46
TIFF_G3
@ TIFF_G3
Definition: tiff.h:131
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:49
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:82
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:72
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:975
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:253
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:417
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:528
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:492
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:262
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:90
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:111
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:48
bps
unsigned bps
Definition: movenc.c:1738
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:202
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:99
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1873
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:71
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:143
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:62
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:55
TiffContext::fill_order
int fill_order
Definition: tiff.c:81
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:95
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:81
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:58
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
height
#define height
TiffContext::white_level
unsigned white_level
Definition: tiff.c:98
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:106
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:256
TiffContext::planar
int planar
Definition: tiff.c:77
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:52
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:50
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:168
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2455
RET_GEOKEY
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:143
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:107
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:70
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:205
TiffContext::le
int le
Definition: tiff.c:74
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:123
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:709
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:78
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:73
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1905
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:203
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:50
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:194
TiffContext::tile_width
int tile_width
Definition: tiff.c:112
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:79
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:71
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:458
TiffContext::res
uint32_t res[4]
Definition: tiff.c:82
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:94
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:65
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:280
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
planes
static const struct @363 planes[]
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:75
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1486
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:41
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:104
TiffContext::stripoff
int stripoff
Definition: tiff.c:106
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:339
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:192
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:91
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:59
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:80
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:207
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1737
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:77
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:108
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:73
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:51
d65_white
static const float d65_white[3]
Definition: tiff.c:125
pos
unsigned int pos
Definition: spdifenc.c:413
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:148
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:53
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:56
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:191
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:84
AVCodecContext
main external API structure.
Definition: avcodec.h:441
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:420
TiffContext::sstype
int sstype
Definition: tiff.c:104
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:78
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:41
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:708
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:107
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1233
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:139
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:631
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:97
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:124
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:110
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:418
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:64
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
av_clip_uint16
#define av_clip_uint16
Definition: common.h:108
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:96
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:112
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:421
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1044
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:132
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:138
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:195
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AVPacket
This structure stores compressed data.
Definition: packet.h:468
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:84
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:317
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:742
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:109
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:111
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:144
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:61
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:63
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:276
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:128
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:152
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:102
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:53
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:450
TiffContext::predictor
int predictor
Definition: tiff.c:80
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:494
int
int
Definition: ffmpeg_filter.c:368
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:63
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:198
TiffContext::strips
int strips
Definition: tiff.c:104
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:160
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:121
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:101
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:281
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:119
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:113
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:144