FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVPacket *jpkt; // encoded JPEG tile
62  AVFrame *jpgframe; // decoded JPEG tile
63 
65  uint16_t get_page;
67 
69  int width, height;
70  unsigned int bpp, bppcount;
71  uint32_t palette[256];
73  int le;
76  int planar;
77  int subsampling[2];
78  int fax_opts;
79  int predictor;
81  uint32_t res[4];
83  unsigned last_tag;
84 
85  int is_bayer;
87  unsigned black_level;
88  unsigned white_level;
89  uint16_t dng_lut[65536];
90 
91  uint32_t sub_ifd;
92  uint16_t cur_page;
93 
94  int strips, rps, sstype;
95  int sot;
98 
99  /* Tile support */
100  int is_tiled;
104 
105  int is_jpeg;
106 
110  unsigned int yuv_line_size;
111 
114 } TiffContext;
115 
116 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
117  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
118  s->tiff_type = tiff_type;
119 }
120 
121 static void free_geotags(TiffContext *const s)
122 {
123  int i;
124  for (i = 0; i < s->geotag_count; i++) {
125  if (s->geotags[i].val)
126  av_freep(&s->geotags[i].val);
127  }
128  av_freep(&s->geotags);
129  s->geotag_count = 0;
130 }
131 
132 #define RET_GEOKEY(TYPE, array, element)\
133  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
134  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
135  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
136 
137 static const char *get_geokey_name(int key)
138 {
139  RET_GEOKEY(VERT, vert, name);
140  RET_GEOKEY(PROJ, proj, name);
141  RET_GEOKEY(GEOG, geog, name);
142  RET_GEOKEY(CONF, conf, name);
143 
144  return NULL;
145 }
146 
147 static int get_geokey_type(int key)
148 {
149  RET_GEOKEY(VERT, vert, type);
150  RET_GEOKEY(PROJ, proj, type);
151  RET_GEOKEY(GEOG, geog, type);
152  RET_GEOKEY(CONF, conf, type);
153 
154  return AVERROR_INVALIDDATA;
155 }
156 
157 static int cmp_id_key(const void *id, const void *k)
158 {
159  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
160 }
161 
162 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
163 {
164  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
165  if(r)
166  return r->name;
167 
168  return NULL;
169 }
170 
171 static char *get_geokey_val(int key, int val)
172 {
173  char *ap;
174 
176  return av_strdup("undefined");
178  return av_strdup("User-Defined");
179 
180 #define RET_GEOKEY_VAL(TYPE, array)\
181  if (val >= TIFF_##TYPE##_OFFSET &&\
182  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
183  return av_strdup(tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
184 
185  switch (key) {
187  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
188  break;
190  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
191  break;
195  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
196  break;
199  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
200  break;
202  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
203  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
204  break;
206  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
207  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
208  break;
210  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
211  break;
213  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
214  break;
217  if(ap) return ap;
218  break;
221  if(ap) return ap;
222  break;
224  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
225  break;
227  RET_GEOKEY_VAL(VERT_CS, vert_cs);
228  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
229  break;
230 
231  }
232 
233  ap = av_malloc(14);
234  if (ap)
235  snprintf(ap, 14, "Unknown-%d", val);
236  return ap;
237 }
238 
239 static char *doubles2str(double *dp, int count, const char *sep)
240 {
241  int i;
242  char *ap, *ap0;
243  uint64_t component_len;
244  if (!sep) sep = ", ";
245  component_len = 24LL + strlen(sep);
246  if (count >= (INT_MAX - 1)/component_len)
247  return NULL;
248  ap = av_malloc(component_len * count + 1);
249  if (!ap)
250  return NULL;
251  ap0 = ap;
252  ap[0] = '\0';
253  for (i = 0; i < count; i++) {
254  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
255  if(l >= component_len) {
256  av_free(ap0);
257  return NULL;
258  }
259  ap += l;
260  }
261  ap0[strlen(ap0) - strlen(sep)] = '\0';
262  return ap0;
263 }
264 
265 static int add_metadata(int count, int type,
266  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
267 {
268  switch(type) {
269  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
270  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
271  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
272  default : return AVERROR_INVALIDDATA;
273  };
274 }
275 
276 /**
277  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
278  */
279 static uint16_t av_always_inline dng_process_color16(uint16_t value,
280  const uint16_t *lut,
281  uint16_t black_level,
282  float scale_factor)
283 {
284  float value_norm;
285 
286  // Lookup table lookup
287  if (lut)
288  value = lut[value];
289 
290  // Black level subtraction
291  value = av_clip_uint16_c((unsigned)value - black_level);
292 
293  // Color scaling
294  value_norm = (float)value * scale_factor;
295 
296  value = av_clip_uint16_c(value_norm * 65535);
297 
298  return value;
299 }
300 
301 static uint16_t av_always_inline dng_process_color8(uint16_t value,
302  const uint16_t *lut,
303  uint16_t black_level,
304  float scale_factor)
305 {
306  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
307 }
308 
309 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
310  const uint8_t *src, int src_stride, int width, int height,
311  int is_single_comp, int is_u16)
312 {
313  int line, col;
314  float scale_factor;
315 
316  scale_factor = 1.0f / (s->white_level - s->black_level);
317 
318  if (is_single_comp) {
319  if (!is_u16)
320  return; /* <= 8bpp unsupported */
321 
322  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
323  (split vertically in the middle). */
324  for (line = 0; line < height / 2; line++) {
325  uint16_t *dst_u16 = (uint16_t *)dst;
326  uint16_t *src_u16 = (uint16_t *)src;
327 
328  /* Blit first half of input row row to initial row of output */
329  for (col = 0; col < width; col++)
330  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
331 
332  /* Advance the destination pointer by a row (source pointer remains in the same place) */
333  dst += dst_stride * sizeof(uint16_t);
334  dst_u16 = (uint16_t *)dst;
335 
336  /* Blit second half of input row row to next row of output */
337  for (col = 0; col < width; col++)
338  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
339 
340  dst += dst_stride * sizeof(uint16_t);
341  src += src_stride * sizeof(uint16_t);
342  }
343  } else {
344  /* Input and output image are the same size and the MJpeg decoder has done per-component
345  deinterleaving, so blitting here is straightforward. */
346  if (is_u16) {
347  for (line = 0; line < height; line++) {
348  uint16_t *dst_u16 = (uint16_t *)dst;
349  uint16_t *src_u16 = (uint16_t *)src;
350 
351  for (col = 0; col < width; col++)
352  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
353 
354  dst += dst_stride * sizeof(uint16_t);
355  src += src_stride * sizeof(uint16_t);
356  }
357  } else {
358  for (line = 0; line < height; line++) {
359  uint8_t *dst_u8 = dst;
360  const uint8_t *src_u8 = src;
361 
362  for (col = 0; col < width; col++)
363  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut, s->black_level, scale_factor);
364 
365  dst += dst_stride;
366  src += src_stride;
367  }
368  }
369  }
370 }
371 
373  unsigned int bpp, uint8_t* dst,
374  int usePtr, const uint8_t *src,
375  uint8_t c, int width, int offset)
376 {
377  switch (bpp) {
378  case 1:
379  while (--width >= 0) {
380  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
381  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
382  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
383  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
384  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
385  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
386  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
387  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
388  }
389  break;
390  case 2:
391  while (--width >= 0) {
392  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
393  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
394  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
395  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
396  }
397  break;
398  case 4:
399  while (--width >= 0) {
400  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
401  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
402  }
403  break;
404  case 10:
405  case 12:
406  case 14: {
407  uint16_t *dst16 = (uint16_t *)dst;
408  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
409  uint8_t shift = is_dng ? 0 : 16 - bpp;
410  GetBitContext gb;
411 
412  init_get_bits8(&gb, src, width);
413  for (int i = 0; i < s->width; i++) {
414  dst16[i] = get_bits(&gb, bpp) << shift;
415  }
416  }
417  break;
418  default:
419  if (usePtr) {
420  memcpy(dst + offset, src, width);
421  } else {
422  memset(dst + offset, c, width);
423  }
424  }
425 }
426 
427 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
428 {
429  int i;
430 
431  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
432  if (!s->deinvert_buf)
433  return AVERROR(ENOMEM);
434  for (i = 0; i < size; i++)
435  s->deinvert_buf[i] = ff_reverse[src[i]];
436 
437  return 0;
438 }
439 
440 static void unpack_gray(TiffContext *s, AVFrame *p,
441  const uint8_t *src, int lnum, int width, int bpp)
442 {
443  GetBitContext gb;
444  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
445 
446  init_get_bits8(&gb, src, width);
447 
448  for (int i = 0; i < s->width; i++) {
449  dst[i] = get_bits(&gb, bpp);
450  }
451 }
452 
453 static void unpack_yuv(TiffContext *s, AVFrame *p,
454  const uint8_t *src, int lnum)
455 {
456  int i, j, k;
457  int w = (s->width - 1) / s->subsampling[0] + 1;
458  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
459  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
460  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
461  for (i = 0; i < w; i++) {
462  for (j = 0; j < s->subsampling[1]; j++)
463  for (k = 0; k < s->subsampling[0]; k++)
464  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
465  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
466  *pu++ = *src++;
467  *pv++ = *src++;
468  }
469  }else{
470  for (i = 0; i < w; i++) {
471  for (j = 0; j < s->subsampling[1]; j++)
472  for (k = 0; k < s->subsampling[0]; k++)
473  p->data[0][(lnum + j) * p->linesize[0] +
474  i * s->subsampling[0] + k] = *src++;
475  *pu++ = *src++;
476  *pv++ = *src++;
477  }
478  }
479 }
480 
481 #if CONFIG_ZLIB
482 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
483  int size)
484 {
485  z_stream zstream = { 0 };
486  int zret;
487 
488  zstream.next_in = src;
489  zstream.avail_in = size;
490  zstream.next_out = dst;
491  zstream.avail_out = *len;
492  zret = inflateInit(&zstream);
493  if (zret != Z_OK) {
494  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
495  return zret;
496  }
497  zret = inflate(&zstream, Z_SYNC_FLUSH);
498  inflateEnd(&zstream);
499  *len = zstream.total_out;
500  return zret == Z_STREAM_END ? Z_OK : zret;
501 }
502 
503 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
504  const uint8_t *src, int size, int width, int lines,
505  int strip_start, int is_yuv)
506 {
507  uint8_t *zbuf;
508  unsigned long outlen;
509  int ret, line;
510  outlen = width * lines;
511  zbuf = av_malloc(outlen);
512  if (!zbuf)
513  return AVERROR(ENOMEM);
514  if (s->fill_order) {
515  if ((ret = deinvert_buffer(s, src, size)) < 0) {
516  av_free(zbuf);
517  return ret;
518  }
519  src = s->deinvert_buf;
520  }
521  ret = tiff_uncompress(zbuf, &outlen, src, size);
522  if (ret != Z_OK) {
523  av_log(s->avctx, AV_LOG_ERROR,
524  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
525  (unsigned long)width * lines, ret);
526  av_free(zbuf);
527  return AVERROR_UNKNOWN;
528  }
529  src = zbuf;
530  for (line = 0; line < lines; line++) {
531  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
532  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
533  } else {
534  memcpy(dst, src, width);
535  }
536  if (is_yuv) {
537  unpack_yuv(s, p, dst, strip_start + line);
538  line += s->subsampling[1] - 1;
539  }
540  dst += stride;
541  src += width;
542  }
543  av_free(zbuf);
544  return 0;
545 }
546 #endif
547 
548 #if CONFIG_LZMA
549 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
550  int size)
551 {
552  lzma_stream stream = LZMA_STREAM_INIT;
553  lzma_ret ret;
554 
555  stream.next_in = (uint8_t *)src;
556  stream.avail_in = size;
557  stream.next_out = dst;
558  stream.avail_out = *len;
559  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
560  if (ret != LZMA_OK) {
561  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
562  return ret;
563  }
564  ret = lzma_code(&stream, LZMA_RUN);
565  lzma_end(&stream);
566  *len = stream.total_out;
567  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
568 }
569 
570 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
571  const uint8_t *src, int size, int width, int lines,
572  int strip_start, int is_yuv)
573 {
574  uint64_t outlen = width * (uint64_t)lines;
575  int ret, line;
576  uint8_t *buf = av_malloc(outlen);
577  if (!buf)
578  return AVERROR(ENOMEM);
579  if (s->fill_order) {
580  if ((ret = deinvert_buffer(s, src, size)) < 0) {
581  av_free(buf);
582  return ret;
583  }
584  src = s->deinvert_buf;
585  }
586  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
587  if (ret != LZMA_OK) {
588  av_log(s->avctx, AV_LOG_ERROR,
589  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
590  (uint64_t)width * lines, ret);
591  av_free(buf);
592  return AVERROR_UNKNOWN;
593  }
594  src = buf;
595  for (line = 0; line < lines; line++) {
596  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
597  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
598  } else {
599  memcpy(dst, src, width);
600  }
601  if (is_yuv) {
602  unpack_yuv(s, p, dst, strip_start + line);
603  line += s->subsampling[1] - 1;
604  }
605  dst += stride;
606  src += width;
607  }
608  av_free(buf);
609  return 0;
610 }
611 #endif
612 
613 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
614  const uint8_t *src, int size, int width, int lines)
615 {
616  int line;
617  int ret;
618 
619  if (s->fill_order) {
620  if ((ret = deinvert_buffer(s, src, size)) < 0)
621  return ret;
622  src = s->deinvert_buf;
623  }
624  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
625  s->compr, s->fax_opts);
626  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
627  for (line = 0; line < lines; line++) {
628  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
629  dst += stride;
630  }
631  return ret;
632 }
633 
635  int tile_byte_count, int dst_x, int dst_y, int w, int h)
636 {
637  TiffContext *s = avctx->priv_data;
638  uint8_t *dst_data, *src_data;
639  uint32_t dst_offset; /* offset from dst buffer in pixels */
640  int is_single_comp, is_u16, pixel_size;
641  int ret;
642 
643  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
644  return AVERROR_INVALIDDATA;
645 
646  /* Prepare a packet and send to the MJPEG decoder */
647  av_packet_unref(s->jpkt);
648  s->jpkt->data = (uint8_t*)s->gb.buffer;
649  s->jpkt->size = tile_byte_count;
650 
651  if (s->is_bayer) {
652  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
653  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
654  image or not from its own data (and we need that information when decoding it). */
655  mjpegdecctx->bayer = 1;
656  }
657 
658  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
659  if (ret < 0) {
660  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
661  return ret;
662  }
663 
664  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
665  if (ret < 0) {
666  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
667 
668  /* Normally skip, error if explode */
669  if (avctx->err_recognition & AV_EF_EXPLODE)
670  return AVERROR_INVALIDDATA;
671  else
672  return 0;
673  }
674 
675  is_u16 = (s->bpp > 8);
676 
677  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
678 
679  if (s->jpgframe->width != s->avctx_mjpeg->width ||
680  s->jpgframe->height != s->avctx_mjpeg->height ||
681  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
682  return AVERROR_INVALIDDATA;
683 
684  /* See dng_blit for explanation */
685  if (s->avctx_mjpeg->width == w * 2 &&
686  s->avctx_mjpeg->height == h / 2 &&
687  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
688  is_single_comp = 1;
689  } else if (s->avctx_mjpeg->width >= w &&
690  s->avctx_mjpeg->height >= h &&
691  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
692  ) {
693  is_single_comp = 0;
694  } else
695  return AVERROR_INVALIDDATA;
696 
697  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
698 
699  if (is_single_comp && !is_u16) {
700  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
701  av_frame_unref(s->jpgframe);
702  return AVERROR_PATCHWELCOME;
703  }
704 
705  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
706  dst_data = frame->data[0] + dst_offset * pixel_size;
707  src_data = s->jpgframe->data[0];
708 
709  dng_blit(s,
710  dst_data,
711  frame->linesize[0] / pixel_size,
712  src_data,
713  s->jpgframe->linesize[0] / pixel_size,
714  w,
715  h,
716  is_single_comp,
717  is_u16);
718 
719  av_frame_unref(s->jpgframe);
720 
721  return 0;
722 }
723 
725 {
726  TiffContext *s = avctx->priv_data;
727 
728  s->jpgframe->width = s->width;
729  s->jpgframe->height = s->height;
730 
731  s->avctx_mjpeg->width = s->width;
732  s->avctx_mjpeg->height = s->height;
733 
734  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
735 }
736 
738  const uint8_t *src, int size, int strip_start, int lines)
739 {
740  PutByteContext pb;
741  int c, line, pixels, code, ret;
742  const uint8_t *ssrc = src;
743  int width = ((s->width * s->bpp) + 7) >> 3;
745  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
746  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
747  desc->nb_components >= 3;
748  int is_dng;
749 
750  if (s->planar)
751  width /= s->bppcount;
752 
753  if (size <= 0)
754  return AVERROR_INVALIDDATA;
755 
756  if (is_yuv) {
757  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
758  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
759  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
760  if (s->yuv_line == NULL) {
761  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
762  return AVERROR(ENOMEM);
763  }
764  dst = s->yuv_line;
765  stride = 0;
766 
767  width = (s->width - 1) / s->subsampling[0] + 1;
768  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
769  av_assert0(width <= bytes_per_row);
770  av_assert0(s->bpp == 24);
771  }
772  if (s->is_bayer) {
773  av_assert0(width == (s->bpp * s->width + 7) >> 3);
774  }
775  if (p->format == AV_PIX_FMT_GRAY12) {
776  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
777  if (s->yuv_line == NULL) {
778  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
779  return AVERROR(ENOMEM);
780  }
781  dst = s->yuv_line;
782  stride = 0;
783  }
784 
785  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
786 #if CONFIG_ZLIB
787  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
788  strip_start, is_yuv);
789 #else
790  av_log(s->avctx, AV_LOG_ERROR,
791  "zlib support not enabled, "
792  "deflate compression not supported\n");
793  return AVERROR(ENOSYS);
794 #endif
795  }
796  if (s->compr == TIFF_LZMA) {
797 #if CONFIG_LZMA
798  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
799  strip_start, is_yuv);
800 #else
801  av_log(s->avctx, AV_LOG_ERROR,
802  "LZMA support not enabled\n");
803  return AVERROR(ENOSYS);
804 #endif
805  }
806  if (s->compr == TIFF_LZW) {
807  if (s->fill_order) {
808  if ((ret = deinvert_buffer(s, src, size)) < 0)
809  return ret;
810  ssrc = src = s->deinvert_buf;
811  }
812  if (size > 1 && !src[0] && (src[1]&1)) {
813  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
814  }
815  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
816  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
817  return ret;
818  }
819  for (line = 0; line < lines; line++) {
820  pixels = ff_lzw_decode(s->lzw, dst, width);
821  if (pixels < width) {
822  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
823  pixels, width);
824  return AVERROR_INVALIDDATA;
825  }
826  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
827  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
828  if (is_yuv) {
829  unpack_yuv(s, p, dst, strip_start + line);
830  line += s->subsampling[1] - 1;
831  } else if (p->format == AV_PIX_FMT_GRAY12) {
832  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
833  }
834  dst += stride;
835  }
836  return 0;
837  }
838  if (s->compr == TIFF_CCITT_RLE ||
839  s->compr == TIFF_G3 ||
840  s->compr == TIFF_G4) {
841  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
842  return AVERROR_INVALIDDATA;
843 
844  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
845  }
846 
847  bytestream2_init(&s->gb, src, size);
848  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
849 
850  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
851 
852  /* Decode JPEG-encoded DNGs with strips */
853  if (s->compr == TIFF_NEWJPEG && is_dng) {
854  if (s->strips > 1) {
855  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
856  return AVERROR_PATCHWELCOME;
857  }
858  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
859  return ret;
860  return 0;
861  }
862 
863  if (is_dng && stride == 0)
864  return AVERROR_INVALIDDATA;
865 
866  for (line = 0; line < lines; line++) {
867  if (src - ssrc > size) {
868  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
869  return AVERROR_INVALIDDATA;
870  }
871 
872  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
873  break;
874  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
875  switch (s->compr) {
876  case TIFF_RAW:
877  if (ssrc + size - src < width)
878  return AVERROR_INVALIDDATA;
879 
880  if (!s->fill_order) {
881  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
882  dst, 1, src, 0, width, 0);
883  } else {
884  int i;
885  for (i = 0; i < width; i++)
886  dst[i] = ff_reverse[src[i]];
887  }
888 
889  /* Color processing for DNG images with uncompressed strips (non-tiled) */
890  if (is_dng) {
891  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
892 
893  is_u16 = (s->bpp / s->bppcount > 8);
894  pixel_size_bits = (is_u16 ? 16 : 8);
895  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
896 
897  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
898  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
899  dng_blit(s,
900  dst,
901  0, // no stride, only 1 line
902  dst,
903  0, // no stride, only 1 line
904  elements,
905  1,
906  0, // single-component variation is only preset in JPEG-encoded DNGs
907  is_u16);
908  }
909 
910  src += width;
911  break;
912  case TIFF_PACKBITS:
913  for (pixels = 0; pixels < width;) {
914  if (ssrc + size - src < 2) {
915  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
916  return AVERROR_INVALIDDATA;
917  }
918  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
919  if (code >= 0) {
920  code++;
921  if (pixels + code > width ||
922  ssrc + size - src < code) {
923  av_log(s->avctx, AV_LOG_ERROR,
924  "Copy went out of bounds\n");
925  return AVERROR_INVALIDDATA;
926  }
927  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
928  dst, 1, src, 0, code, pixels);
929  src += code;
930  pixels += code;
931  } else if (code != -128) { // -127..-1
932  code = (-code) + 1;
933  if (pixels + code > width) {
934  av_log(s->avctx, AV_LOG_ERROR,
935  "Run went out of bounds\n");
936  return AVERROR_INVALIDDATA;
937  }
938  c = *src++;
939  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
940  dst, 0, NULL, c, code, pixels);
941  pixels += code;
942  }
943  }
944  if (s->fill_order) {
945  int i;
946  for (i = 0; i < width; i++)
947  dst[i] = ff_reverse[dst[i]];
948  }
949  break;
950  }
951  if (is_yuv) {
952  unpack_yuv(s, p, dst, strip_start + line);
953  line += s->subsampling[1] - 1;
954  } else if (p->format == AV_PIX_FMT_GRAY12) {
955  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
956  }
957  dst += stride;
958  }
959  return 0;
960 }
961 
963  const AVPacket *avpkt)
964 {
965  TiffContext *s = avctx->priv_data;
966  int tile_idx;
967  int tile_offset_offset, tile_offset;
968  int tile_byte_count_offset, tile_byte_count;
969  int tile_count_x, tile_count_y;
970  int tile_width, tile_length;
971  int has_width_leftover, has_height_leftover;
972  int tile_x = 0, tile_y = 0;
973  int pos_x = 0, pos_y = 0;
974  int ret;
975 
976  s->jpgframe->width = s->tile_width;
977  s->jpgframe->height = s->tile_length;
978 
979  s->avctx_mjpeg->width = s->tile_width;
980  s->avctx_mjpeg->height = s->tile_length;
981 
982  has_width_leftover = (s->width % s->tile_width != 0);
983  has_height_leftover = (s->height % s->tile_length != 0);
984 
985  /* Calculate tile counts (round up) */
986  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
987  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
988 
989  /* Iterate over the number of tiles */
990  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
991  tile_x = tile_idx % tile_count_x;
992  tile_y = tile_idx / tile_count_x;
993 
994  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
995  tile_width = s->width % s->tile_width;
996  else
997  tile_width = s->tile_width;
998 
999  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1000  tile_length = s->height % s->tile_length;
1001  else
1002  tile_length = s->tile_length;
1003 
1004  /* Read tile offset */
1005  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1006  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1007  tile_offset = ff_tget_long(&s->gb, s->le);
1008 
1009  /* Read tile byte size */
1010  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1011  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1012  tile_byte_count = ff_tget_long(&s->gb, s->le);
1013 
1014  /* Seek to tile data */
1015  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1016 
1017  /* Decode JPEG tile and copy it in the reference frame */
1018  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1019 
1020  if (ret < 0)
1021  return ret;
1022 
1023  /* Advance current positions */
1024  pos_x += tile_width;
1025  if (tile_x == tile_count_x - 1) { // If on the right edge
1026  pos_x = 0;
1027  pos_y += tile_length;
1028  }
1029  }
1030 
1031  /* Frame is ready to be output */
1032  frame->pict_type = AV_PICTURE_TYPE_I;
1033  frame->key_frame = 1;
1034 
1035  return avpkt->size;
1036 }
1037 
1039 {
1040  int ret;
1041  int create_gray_palette = 0;
1042 
1043  // make sure there is no aliasing in the following switch
1044  if (s->bpp >= 100 || s->bppcount >= 10) {
1045  av_log(s->avctx, AV_LOG_ERROR,
1046  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1047  s->bpp, s->bppcount);
1048  return AVERROR_INVALIDDATA;
1049  }
1050 
1051  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1052  case 11:
1053  if (!s->palette_is_set) {
1054  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1055  break;
1056  }
1057  case 21:
1058  case 41:
1059  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1060  if (!s->palette_is_set) {
1061  create_gray_palette = 1;
1062  }
1063  break;
1064  case 81:
1065  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1066  break;
1067  case 121:
1068  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1069  break;
1070  case 10081:
1071  switch (AV_RL32(s->pattern)) {
1072  case 0x02010100:
1073  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1074  break;
1075  case 0x00010102:
1076  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1077  break;
1078  case 0x01000201:
1079  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1080  break;
1081  case 0x01020001:
1082  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1083  break;
1084  default:
1085  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1086  AV_RL32(s->pattern));
1087  return AVERROR_PATCHWELCOME;
1088  }
1089  break;
1090  case 10101:
1091  case 10121:
1092  case 10141:
1093  case 10161:
1094  switch (AV_RL32(s->pattern)) {
1095  case 0x02010100:
1096  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1097  break;
1098  case 0x00010102:
1099  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1100  break;
1101  case 0x01000201:
1102  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1103  break;
1104  case 0x01020001:
1105  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1106  break;
1107  default:
1108  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1109  AV_RL32(s->pattern));
1110  return AVERROR_PATCHWELCOME;
1111  }
1112  break;
1113  case 243:
1114  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1115  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1116  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1117  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1118  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1119  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1120  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1121  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1122  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1123  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1124  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1125  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1126  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1127  } else {
1128  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1129  return AVERROR_PATCHWELCOME;
1130  }
1131  } else
1132  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1133  break;
1134  case 161:
1135  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1136  break;
1137  case 162:
1138  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1139  break;
1140  case 322:
1141  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1142  break;
1143  case 324:
1144  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1145  break;
1146  case 405:
1147  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1148  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1149  else {
1150  av_log(s->avctx, AV_LOG_ERROR,
1151  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1152  return AVERROR_PATCHWELCOME;
1153  }
1154  break;
1155  case 483:
1156  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1157  break;
1158  case 644:
1159  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1160  break;
1161  case 1243:
1162  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1163  break;
1164  case 1324:
1165  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1166  break;
1167  case 1483:
1168  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1169  break;
1170  case 1644:
1171  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1172  break;
1173  default:
1174  av_log(s->avctx, AV_LOG_ERROR,
1175  "This format is not supported (bpp=%d, bppcount=%d)\n",
1176  s->bpp, s->bppcount);
1177  return AVERROR_INVALIDDATA;
1178  }
1179 
1180  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1181  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1182  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1183  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1184  desc->nb_components < 3) {
1185  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1186  return AVERROR_INVALIDDATA;
1187  }
1188  }
1189 
1190  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1191  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1192  if (ret < 0)
1193  return ret;
1194  }
1195  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1196  return ret;
1197  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1198  if (!create_gray_palette)
1199  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1200  else {
1201  /* make default grayscale pal */
1202  int i;
1203  uint32_t *pal = (uint32_t *)frame->f->data[1];
1204  for (i = 0; i < 1<<s->bpp; i++)
1205  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1206  }
1207  }
1208  return 0;
1209 }
1210 
1211 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1212 {
1213  int offset = tag == TIFF_YRES ? 2 : 0;
1214  s->res[offset++] = num;
1215  s->res[offset] = den;
1216  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1217  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1218  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1219  if (num > INT64_MAX || den > INT64_MAX) {
1220  num = num >> 1;
1221  den = den >> 1;
1222  }
1223  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1224  num, den, INT32_MAX);
1225  if (!s->avctx->sample_aspect_ratio.den)
1226  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1227  }
1228 }
1229 
1231 {
1232  AVFrameSideData *sd;
1233  GetByteContext gb_temp;
1234  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1235  int i, start;
1236  int pos;
1237  int ret;
1238  double *dp;
1239 
1240  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1241  if (ret < 0) {
1242  goto end;
1243  }
1244  if (tag <= s->last_tag)
1245  return AVERROR_INVALIDDATA;
1246 
1247  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1248  if (tag != TIFF_STRIP_SIZE)
1249  s->last_tag = tag;
1250 
1251  off = bytestream2_tell(&s->gb);
1252  if (count == 1) {
1253  switch (type) {
1254  case TIFF_BYTE:
1255  case TIFF_SHORT:
1256  case TIFF_LONG:
1257  value = ff_tget(&s->gb, type, s->le);
1258  break;
1259  case TIFF_RATIONAL:
1260  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1261  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1262  if (!value2) {
1263  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1264  return AVERROR_INVALIDDATA;
1265  }
1266 
1267  break;
1268  case TIFF_STRING:
1269  if (count <= 4) {
1270  break;
1271  }
1272  default:
1273  value = UINT_MAX;
1274  }
1275  }
1276 
1277  switch (tag) {
1278  case TIFF_SUBFILE:
1279  s->is_thumbnail = (value != 0);
1280  break;
1281  case TIFF_WIDTH:
1282  s->width = value;
1283  break;
1284  case TIFF_HEIGHT:
1285  s->height = value;
1286  break;
1287  case TIFF_BPP:
1288  if (count > 5 || count <= 0) {
1289  av_log(s->avctx, AV_LOG_ERROR,
1290  "This format is not supported (bpp=%d, %d components)\n",
1291  value, count);
1292  return AVERROR_INVALIDDATA;
1293  }
1294  s->bppcount = count;
1295  if (count == 1)
1296  s->bpp = value;
1297  else {
1298  switch (type) {
1299  case TIFF_BYTE:
1300  case TIFF_SHORT:
1301  case TIFF_LONG:
1302  s->bpp = 0;
1303  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1304  return AVERROR_INVALIDDATA;
1305  for (i = 0; i < count; i++)
1306  s->bpp += ff_tget(&s->gb, type, s->le);
1307  break;
1308  default:
1309  s->bpp = -1;
1310  }
1311  }
1312  break;
1314  if (count != 1) {
1315  av_log(s->avctx, AV_LOG_ERROR,
1316  "Samples per pixel requires a single value, many provided\n");
1317  return AVERROR_INVALIDDATA;
1318  }
1319  if (value > 5 || value <= 0) {
1320  av_log(s->avctx, AV_LOG_ERROR,
1321  "Invalid samples per pixel %d\n", value);
1322  return AVERROR_INVALIDDATA;
1323  }
1324  if (s->bppcount == 1)
1325  s->bpp *= value;
1326  s->bppcount = value;
1327  break;
1328  case TIFF_COMPR:
1329  s->compr = value;
1330  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1331  s->predictor = 0;
1332  switch (s->compr) {
1333  case TIFF_RAW:
1334  case TIFF_PACKBITS:
1335  case TIFF_LZW:
1336  case TIFF_CCITT_RLE:
1337  break;
1338  case TIFF_G3:
1339  case TIFF_G4:
1340  s->fax_opts = 0;
1341  break;
1342  case TIFF_DEFLATE:
1343  case TIFF_ADOBE_DEFLATE:
1344 #if CONFIG_ZLIB
1345  break;
1346 #else
1347  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1348  return AVERROR(ENOSYS);
1349 #endif
1350  case TIFF_JPEG:
1351  case TIFF_NEWJPEG:
1352  s->is_jpeg = 1;
1353  break;
1354  case TIFF_LZMA:
1355 #if CONFIG_LZMA
1356  break;
1357 #else
1358  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1359  return AVERROR(ENOSYS);
1360 #endif
1361  default:
1362  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1363  s->compr);
1364  return AVERROR_INVALIDDATA;
1365  }
1366  break;
1367  case TIFF_ROWSPERSTRIP:
1368  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1369  value = s->height;
1370  s->rps = FFMIN(value, s->height);
1371  break;
1372  case TIFF_STRIP_OFFS:
1373  if (count == 1) {
1374  if (value > INT_MAX) {
1375  av_log(s->avctx, AV_LOG_ERROR,
1376  "strippos %u too large\n", value);
1377  return AVERROR_INVALIDDATA;
1378  }
1379  s->strippos = 0;
1380  s->stripoff = value;
1381  } else
1382  s->strippos = off;
1383  s->strips = count;
1384  if (s->strips == 1)
1385  s->rps = s->height;
1386  s->sot = type;
1387  break;
1388  case TIFF_STRIP_SIZE:
1389  if (count == 1) {
1390  if (value > INT_MAX) {
1391  av_log(s->avctx, AV_LOG_ERROR,
1392  "stripsize %u too large\n", value);
1393  return AVERROR_INVALIDDATA;
1394  }
1395  s->stripsizesoff = 0;
1396  s->stripsize = value;
1397  s->strips = 1;
1398  } else {
1399  s->stripsizesoff = off;
1400  }
1401  s->strips = count;
1402  s->sstype = type;
1403  break;
1404  case TIFF_XRES:
1405  case TIFF_YRES:
1406  set_sar(s, tag, value, value2);
1407  break;
1408  case TIFF_TILE_OFFSETS:
1409  s->tile_offsets_offset = off;
1410  s->tile_count = count;
1411  s->is_tiled = 1;
1412  break;
1413  case TIFF_TILE_BYTE_COUNTS:
1414  s->tile_byte_counts_offset = off;
1415  break;
1416  case TIFF_TILE_LENGTH:
1417  s->tile_length = value;
1418  break;
1419  case TIFF_TILE_WIDTH:
1420  s->tile_width = value;
1421  break;
1422  case TIFF_PREDICTOR:
1423  s->predictor = value;
1424  break;
1425  case TIFF_SUB_IFDS:
1426  if (count == 1)
1427  s->sub_ifd = value;
1428  else if (count > 1)
1429  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1430  break;
1432  if (count > FF_ARRAY_ELEMS(s->dng_lut))
1433  return AVERROR_INVALIDDATA;
1434  for (int i = 0; i < count; i++)
1435  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1436  break;
1437  case DNG_BLACK_LEVEL:
1438  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1439  if (type == TIFF_RATIONAL) {
1440  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1441  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1442  if (!value2) {
1443  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1444  return AVERROR_INVALIDDATA;
1445  }
1446 
1447  s->black_level = value / value2;
1448  } else
1449  s->black_level = ff_tget(&s->gb, type, s->le);
1450  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1451  } else {
1452  s->black_level = value / value2;
1453  }
1454  break;
1455  case DNG_WHITE_LEVEL:
1456  s->white_level = value;
1457  break;
1458  case TIFF_CFA_PATTERN_DIM:
1459  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1460  ff_tget(&s->gb, type, s->le) != 2)) {
1461  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1462  return AVERROR_INVALIDDATA;
1463  }
1464  break;
1465  case TIFF_CFA_PATTERN:
1466  s->is_bayer = 1;
1467  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1468  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1469  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1470  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1471  break;
1472  case TIFF_PHOTOMETRIC:
1473  switch (value) {
1476  case TIFF_PHOTOMETRIC_RGB:
1480  case TIFF_PHOTOMETRIC_CFA:
1481  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1482  s->photometric = value;
1483  break;
1491  "PhotometricInterpretation 0x%04X",
1492  value);
1493  return AVERROR_PATCHWELCOME;
1494  default:
1495  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1496  "unknown\n", value);
1497  return AVERROR_INVALIDDATA;
1498  }
1499  break;
1500  case TIFF_FILL_ORDER:
1501  if (value < 1 || value > 2) {
1502  av_log(s->avctx, AV_LOG_ERROR,
1503  "Unknown FillOrder value %d, trying default one\n", value);
1504  value = 1;
1505  }
1506  s->fill_order = value - 1;
1507  break;
1508  case TIFF_PAL: {
1509  GetByteContext pal_gb[3];
1510  off = type_sizes[type];
1511  if (count / 3 > 256 ||
1512  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1513  return AVERROR_INVALIDDATA;
1514 
1515  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1516  bytestream2_skip(&pal_gb[1], count / 3 * off);
1517  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1518 
1519  off = (type_sizes[type] - 1) << 3;
1520  if (off > 31U) {
1521  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1522  return AVERROR_INVALIDDATA;
1523  }
1524 
1525  for (i = 0; i < count / 3; i++) {
1526  uint32_t p = 0xFF000000;
1527  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1528  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1529  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1530  s->palette[i] = p;
1531  }
1532  s->palette_is_set = 1;
1533  break;
1534  }
1535  case TIFF_PLANAR:
1536  s->planar = value == 2;
1537  break;
1539  if (count != 2) {
1540  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1541  return AVERROR_INVALIDDATA;
1542  }
1543  for (i = 0; i < count; i++) {
1544  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1545  if (s->subsampling[i] <= 0) {
1546  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1547  s->subsampling[i] = 1;
1548  return AVERROR_INVALIDDATA;
1549  }
1550  }
1551  break;
1552  case TIFF_T4OPTIONS:
1553  if (s->compr == TIFF_G3)
1554  s->fax_opts = value;
1555  break;
1556  case TIFF_T6OPTIONS:
1557  if (s->compr == TIFF_G4)
1558  s->fax_opts = value;
1559  break;
1560 #define ADD_METADATA(count, name, sep)\
1561  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1562  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1563  goto end;\
1564  }
1566  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1567  break;
1569  ADD_METADATA(count, "ModelTransformationTag", NULL);
1570  break;
1571  case TIFF_MODEL_TIEPOINT:
1572  ADD_METADATA(count, "ModelTiepointTag", NULL);
1573  break;
1575  if (s->geotag_count) {
1576  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1577  return AVERROR_INVALIDDATA;
1578  }
1579  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1580  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1581  s->geotag_count = ff_tget_short(&s->gb, s->le);
1582  if (s->geotag_count > count / 4 - 1) {
1583  s->geotag_count = count / 4 - 1;
1584  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1585  }
1586  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1587  || s->geotag_count == 0) {
1588  s->geotag_count = 0;
1589  return -1;
1590  }
1591  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1592  if (!s->geotags) {
1593  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1594  s->geotag_count = 0;
1595  goto end;
1596  }
1597  for (i = 0; i < s->geotag_count; i++) {
1598  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1599  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1600  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1601 
1602  if (!s->geotags[i].type)
1603  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1604  else
1605  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1606  }
1607  break;
1609  if (count >= INT_MAX / sizeof(int64_t))
1610  return AVERROR_INVALIDDATA;
1611  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1612  return AVERROR_INVALIDDATA;
1613  dp = av_malloc_array(count, sizeof(double));
1614  if (!dp) {
1615  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1616  goto end;
1617  }
1618  for (i = 0; i < count; i++)
1619  dp[i] = ff_tget_double(&s->gb, s->le);
1620  for (i = 0; i < s->geotag_count; i++) {
1621  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1622  if (s->geotags[i].count == 0
1623  || s->geotags[i].offset + s->geotags[i].count > count) {
1624  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1625  } else if (s->geotags[i].val) {
1626  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1627  } else {
1628  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1629  if (!ap) {
1630  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1631  av_freep(&dp);
1632  return AVERROR(ENOMEM);
1633  }
1634  s->geotags[i].val = ap;
1635  }
1636  }
1637  }
1638  av_freep(&dp);
1639  break;
1640  case TIFF_GEO_ASCII_PARAMS:
1641  pos = bytestream2_tell(&s->gb);
1642  for (i = 0; i < s->geotag_count; i++) {
1643  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1644  if (s->geotags[i].count == 0
1645  || s->geotags[i].offset + s->geotags[i].count > count) {
1646  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1647  } else {
1648  char *ap;
1649 
1650  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1651  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1652  return AVERROR_INVALIDDATA;
1653  if (s->geotags[i].val)
1654  return AVERROR_INVALIDDATA;
1655  ap = av_malloc(s->geotags[i].count);
1656  if (!ap) {
1657  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1658  return AVERROR(ENOMEM);
1659  }
1660  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1661  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1662  s->geotags[i].val = ap;
1663  }
1664  }
1665  }
1666  break;
1667  case TIFF_ICC_PROFILE:
1668  gb_temp = s->gb;
1669  bytestream2_seek(&gb_temp, SEEK_SET, off);
1670 
1671  if (bytestream2_get_bytes_left(&gb_temp) < count)
1672  return AVERROR_INVALIDDATA;
1673 
1675  if (!sd)
1676  return AVERROR(ENOMEM);
1677 
1678  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1679  break;
1680  case TIFF_ARTIST:
1681  ADD_METADATA(count, "artist", NULL);
1682  break;
1683  case TIFF_COPYRIGHT:
1684  ADD_METADATA(count, "copyright", NULL);
1685  break;
1686  case TIFF_DATE:
1687  ADD_METADATA(count, "date", NULL);
1688  break;
1689  case TIFF_DOCUMENT_NAME:
1690  ADD_METADATA(count, "document_name", NULL);
1691  break;
1692  case TIFF_HOST_COMPUTER:
1693  ADD_METADATA(count, "computer", NULL);
1694  break;
1696  ADD_METADATA(count, "description", NULL);
1697  break;
1698  case TIFF_MAKE:
1699  ADD_METADATA(count, "make", NULL);
1700  break;
1701  case TIFF_MODEL:
1702  ADD_METADATA(count, "model", NULL);
1703  break;
1704  case TIFF_PAGE_NAME:
1705  ADD_METADATA(count, "page_name", NULL);
1706  break;
1707  case TIFF_PAGE_NUMBER:
1708  ADD_METADATA(count, "page_number", " / ");
1709  // need to seek back to re-read the page number
1710  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1711  // read the page number
1712  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1713  // get back to where we were before the previous seek
1714  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1715  break;
1716  case TIFF_SOFTWARE_NAME:
1717  ADD_METADATA(count, "software", NULL);
1718  break;
1719  case DNG_VERSION:
1720  if (count == 4) {
1721  unsigned int ver[4];
1722  ver[0] = ff_tget(&s->gb, type, s->le);
1723  ver[1] = ff_tget(&s->gb, type, s->le);
1724  ver[2] = ff_tget(&s->gb, type, s->le);
1725  ver[3] = ff_tget(&s->gb, type, s->le);
1726 
1727  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1728  ver[0], ver[1], ver[2], ver[3]);
1729 
1731  }
1732  break;
1733  case CINEMADNG_TIME_CODES:
1734  case CINEMADNG_FRAME_RATE:
1735  case CINEMADNG_T_STOP:
1736  case CINEMADNG_REEL_NAME:
1739  break;
1740  default:
1741  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1742  av_log(s->avctx, AV_LOG_ERROR,
1743  "Unknown or unsupported tag %d/0x%0X\n",
1744  tag, tag);
1745  return AVERROR_INVALIDDATA;
1746  }
1747  }
1748 end:
1749  if (s->bpp > 64U) {
1750  av_log(s->avctx, AV_LOG_ERROR,
1751  "This format is not supported (bpp=%d, %d components)\n",
1752  s->bpp, count);
1753  s->bpp = 0;
1754  return AVERROR_INVALIDDATA;
1755  }
1756  bytestream2_seek(&s->gb, start, SEEK_SET);
1757  return 0;
1758 }
1759 
1760 static int decode_frame(AVCodecContext *avctx,
1761  void *data, int *got_frame, AVPacket *avpkt)
1762 {
1763  TiffContext *const s = avctx->priv_data;
1764  AVFrame *const p = data;
1765  ThreadFrame frame = { .f = data };
1766  unsigned off, last_off;
1767  int le, ret, plane, planes;
1768  int i, j, entries, stride;
1769  unsigned soff, ssize;
1770  uint8_t *dst;
1771  GetByteContext stripsizes;
1772  GetByteContext stripdata;
1773  int retry_for_subifd, retry_for_page;
1774  int is_dng;
1775  int has_tile_bits, has_strip_bits;
1776 
1777  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1778 
1779  // parse image header
1780  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1781  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1782  return ret;
1783  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1784  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1785  return AVERROR_INVALIDDATA;
1786  }
1787  s->le = le;
1788  // TIFF_BPP is not a required tag and defaults to 1
1789 
1790  s->tiff_type = TIFF_TYPE_TIFF;
1791 again:
1792  s->is_thumbnail = 0;
1793  s->bppcount = s->bpp = 1;
1794  s->photometric = TIFF_PHOTOMETRIC_NONE;
1795  s->compr = TIFF_RAW;
1796  s->fill_order = 0;
1797  s->white_level = 0;
1798  s->is_bayer = 0;
1799  s->is_tiled = 0;
1800  s->is_jpeg = 0;
1801  s->cur_page = 0;
1802  s->last_tag = 0;
1803 
1804  for (i = 0; i < 65536; i++)
1805  s->dng_lut[i] = i;
1806 
1807  free_geotags(s);
1808 
1809  // Reset these offsets so we can tell if they were set this frame
1810  s->stripsizesoff = s->strippos = 0;
1811  /* parse image file directory */
1812  bytestream2_seek(&s->gb, off, SEEK_SET);
1813  entries = ff_tget_short(&s->gb, le);
1814  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1815  return AVERROR_INVALIDDATA;
1816  for (i = 0; i < entries; i++) {
1817  if ((ret = tiff_decode_tag(s, p)) < 0)
1818  return ret;
1819  }
1820 
1821  if (s->get_thumbnail && !s->is_thumbnail) {
1822  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1823  return AVERROR_EOF;
1824  }
1825 
1826  /** whether we should process this IFD's SubIFD */
1827  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1828  /** whether we should process this multi-page IFD's next page */
1829  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1830 
1831  last_off = off;
1832  if (retry_for_page) {
1833  // set offset to the next IFD
1834  off = ff_tget_long(&s->gb, le);
1835  } else if (retry_for_subifd) {
1836  // set offset to the SubIFD
1837  off = s->sub_ifd;
1838  }
1839 
1840  if (retry_for_subifd || retry_for_page) {
1841  if (!off) {
1842  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1843  return AVERROR_INVALIDDATA;
1844  }
1845  if (off <= last_off) {
1846  avpriv_request_sample(s->avctx, "non increasing IFD offset");
1847  return AVERROR_INVALIDDATA;
1848  }
1849  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1850  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1851  return AVERROR_INVALIDDATA;
1852  }
1853  s->sub_ifd = 0;
1854  goto again;
1855  }
1856 
1857  /* At this point we've decided on which (Sub)IFD to process */
1858 
1859  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1860 
1861  for (i = 0; i<s->geotag_count; i++) {
1862  const char *keyname = get_geokey_name(s->geotags[i].key);
1863  if (!keyname) {
1864  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1865  continue;
1866  }
1867  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1868  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1869  continue;
1870  }
1871  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1872  if (ret<0) {
1873  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1874  return ret;
1875  }
1876  }
1877 
1878  if (is_dng) {
1879  int bps;
1880 
1881  if (s->bpp % s->bppcount)
1882  return AVERROR_INVALIDDATA;
1883  bps = s->bpp / s->bppcount;
1884  if (bps < 8 || bps > 32)
1885  return AVERROR_INVALIDDATA;
1886 
1887  if (s->white_level == 0)
1888  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
1889 
1890  if (s->white_level <= s->black_level) {
1891  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1892  s->black_level, s->white_level);
1893  return AVERROR_INVALIDDATA;
1894  }
1895 
1896  if (s->planar)
1897  return AVERROR_PATCHWELCOME;
1898  }
1899 
1900  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1901  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1902  return AVERROR_INVALIDDATA;
1903  }
1904 
1905  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
1906  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
1907 
1908  if (has_tile_bits && has_strip_bits) {
1909  int tiled_dng = s->is_tiled && is_dng;
1910  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
1911  if (!tiled_dng)
1912  return AVERROR_INVALIDDATA;
1913  }
1914 
1915  /* now we have the data and may start decoding */
1916  if ((ret = init_image(s, &frame)) < 0)
1917  return ret;
1918 
1919  if (!s->is_tiled || has_strip_bits) {
1920  if (s->strips == 1 && !s->stripsize) {
1921  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1922  s->stripsize = avpkt->size - s->stripoff;
1923  }
1924 
1925  if (s->stripsizesoff) {
1926  if (s->stripsizesoff >= (unsigned)avpkt->size)
1927  return AVERROR_INVALIDDATA;
1928  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1929  avpkt->size - s->stripsizesoff);
1930  }
1931  if (s->strippos) {
1932  if (s->strippos >= (unsigned)avpkt->size)
1933  return AVERROR_INVALIDDATA;
1934  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1935  avpkt->size - s->strippos);
1936  }
1937 
1938  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1939  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1940  return AVERROR_INVALIDDATA;
1941  }
1942  }
1943 
1944  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
1945  s->photometric == TIFF_PHOTOMETRIC_CFA) {
1947  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1949  }
1950 
1951  /* Handle DNG images with JPEG-compressed tiles */
1952 
1953  if (is_dng && s->is_tiled) {
1954  if (!s->is_jpeg) {
1955  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1956  return AVERROR_PATCHWELCOME;
1957  } else if (!s->is_bayer) {
1958  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1959  return AVERROR_PATCHWELCOME;
1960  } else {
1961  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1962  *got_frame = 1;
1963  return ret;
1964  }
1965  }
1966 
1967  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1968 
1969  planes = s->planar ? s->bppcount : 1;
1970  for (plane = 0; plane < planes; plane++) {
1971  uint8_t *five_planes = NULL;
1972  int remaining = avpkt->size;
1973  int decoded_height;
1974  stride = p->linesize[plane];
1975  dst = p->data[plane];
1976  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
1977  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1978  stride = stride * 5 / 4;
1979  five_planes =
1980  dst = av_malloc(stride * s->height);
1981  if (!dst)
1982  return AVERROR(ENOMEM);
1983  }
1984  for (i = 0; i < s->height; i += s->rps) {
1985  if (i)
1986  dst += s->rps * stride;
1987  if (s->stripsizesoff)
1988  ssize = ff_tget(&stripsizes, s->sstype, le);
1989  else
1990  ssize = s->stripsize;
1991 
1992  if (s->strippos)
1993  soff = ff_tget(&stripdata, s->sot, le);
1994  else
1995  soff = s->stripoff;
1996 
1997  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
1998  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
1999  av_freep(&five_planes);
2000  return AVERROR_INVALIDDATA;
2001  }
2002  remaining -= ssize;
2003  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2004  FFMIN(s->rps, s->height - i))) < 0) {
2005  if (avctx->err_recognition & AV_EF_EXPLODE) {
2006  av_freep(&five_planes);
2007  return ret;
2008  }
2009  break;
2010  }
2011  }
2012  decoded_height = FFMIN(i, s->height);
2013 
2014  if (s->predictor == 2) {
2015  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2016  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2017  return AVERROR_PATCHWELCOME;
2018  }
2019  dst = five_planes ? five_planes : p->data[plane];
2020  soff = s->bpp >> 3;
2021  if (s->planar)
2022  soff = FFMAX(soff / s->bppcount, 1);
2023  ssize = s->width * soff;
2024  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2025  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2026  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2027  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2028  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2029  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2030  for (i = 0; i < decoded_height; i++) {
2031  for (j = soff; j < ssize; j += 2)
2032  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2033  dst += stride;
2034  }
2035  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2036  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2037  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2038  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2039  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2040  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2041  for (i = 0; i < decoded_height; i++) {
2042  for (j = soff; j < ssize; j += 2)
2043  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2044  dst += stride;
2045  }
2046  } else {
2047  for (i = 0; i < decoded_height; i++) {
2048  for (j = soff; j < ssize; j++)
2049  dst[j] += dst[j - soff];
2050  dst += stride;
2051  }
2052  }
2053  }
2054 
2055  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2056  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2057  dst = p->data[plane];
2058  for (i = 0; i < s->height; i++) {
2059  for (j = 0; j < stride; j++)
2060  dst[j] = c - dst[j];
2061  dst += stride;
2062  }
2063  }
2064 
2065  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2066  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2067  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2068  uint8_t *src = five_planes ? five_planes : p->data[plane];
2069  dst = p->data[plane];
2070  for (i = 0; i < s->height; i++) {
2071  for (j = 0; j < s->width; j++) {
2072  int k = 255 - src[x * j + 3];
2073  int r = (255 - src[x * j ]) * k;
2074  int g = (255 - src[x * j + 1]) * k;
2075  int b = (255 - src[x * j + 2]) * k;
2076  dst[4 * j ] = r * 257 >> 16;
2077  dst[4 * j + 1] = g * 257 >> 16;
2078  dst[4 * j + 2] = b * 257 >> 16;
2079  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2080  }
2081  src += stride;
2082  dst += p->linesize[plane];
2083  }
2084  av_freep(&five_planes);
2085  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2086  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2087  dst = p->data[plane];
2088  for (i = 0; i < s->height; i++) {
2089  for (j = 0; j < s->width; j++) {
2090  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2091  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2092  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2093  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2094  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2095  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2096  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2097  AV_WB16(dst + 8 * j + 6, 65535);
2098  }
2099  dst += p->linesize[plane];
2100  }
2101  }
2102  }
2103 
2104  if (s->planar && s->bppcount > 2) {
2105  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2106  FFSWAP(int, p->linesize[0], p->linesize[2]);
2107  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2108  FFSWAP(int, p->linesize[0], p->linesize[1]);
2109  }
2110 
2111  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2112  uint16_t *dst = (uint16_t *)p->data[0];
2113  for (i = 0; i < s->height; i++) {
2114  for (j = 0; j < s->width; j++)
2115  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2116  dst += stride / 2;
2117  }
2118  }
2119 
2120  *got_frame = 1;
2121 
2122  return avpkt->size;
2123 }
2124 
2126 {
2127  TiffContext *s = avctx->priv_data;
2128  const AVCodec *codec;
2129  int ret;
2130 
2131  s->width = 0;
2132  s->height = 0;
2133  s->subsampling[0] =
2134  s->subsampling[1] = 1;
2135  s->avctx = avctx;
2136  ff_lzw_decode_open(&s->lzw);
2137  if (!s->lzw)
2138  return AVERROR(ENOMEM);
2140 
2141  /* Allocate JPEG frame */
2142  s->jpgframe = av_frame_alloc();
2143  s->jpkt = av_packet_alloc();
2144  if (!s->jpgframe || !s->jpkt)
2145  return AVERROR(ENOMEM);
2146 
2147  /* Prepare everything needed for JPEG decoding */
2149  if (!codec)
2150  return AVERROR_BUG;
2151  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2152  if (!s->avctx_mjpeg)
2153  return AVERROR(ENOMEM);
2154  s->avctx_mjpeg->flags = avctx->flags;
2155  s->avctx_mjpeg->flags2 = avctx->flags2;
2156  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2157  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2158  ret = avcodec_open2(s->avctx_mjpeg, codec, NULL);
2159  if (ret < 0) {
2160  return ret;
2161  }
2162 
2163  return 0;
2164 }
2165 
2166 static av_cold int tiff_end(AVCodecContext *avctx)
2167 {
2168  TiffContext *const s = avctx->priv_data;
2169 
2170  free_geotags(s);
2171 
2172  ff_lzw_decode_close(&s->lzw);
2173  av_freep(&s->deinvert_buf);
2174  s->deinvert_buf_size = 0;
2175  av_freep(&s->yuv_line);
2176  s->yuv_line_size = 0;
2177  av_frame_free(&s->jpgframe);
2178  av_packet_free(&s->jpkt);
2179  avcodec_free_context(&s->avctx_mjpeg);
2180  return 0;
2181 }
2182 
2183 #define OFFSET(x) offsetof(TiffContext, x)
2184 static const AVOption tiff_options[] = {
2185  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2186  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2187  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2188  { NULL },
2189 };
2190 
2191 static const AVClass tiff_decoder_class = {
2192  .class_name = "TIFF decoder",
2193  .item_name = av_default_item_name,
2194  .option = tiff_options,
2195  .version = LIBAVUTIL_VERSION_INT,
2196 };
2197 
2199  .name = "tiff",
2200  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2201  .type = AVMEDIA_TYPE_VIDEO,
2202  .id = AV_CODEC_ID_TIFF,
2203  .priv_data_size = sizeof(TiffContext),
2204  .init = tiff_init,
2205  .close = tiff_end,
2206  .decode = decode_frame,
2207  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2209  .priv_class = &tiff_decoder_class,
2210 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:68
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:572
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
TiffContext::gb
GetByteContext gb
Definition: tiff.c:57
AVCodec
AVCodec.
Definition: codec.h:197
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:142
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:147
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1230
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:193
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:126
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
get_geokey_val
static char * get_geokey_val(int key, int val)
Definition: tiff.c:171
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:89
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:492
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:279
TiffContext::strippos
int strippos
Definition: tiff.c:96
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:89
init_image
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1038
ff_tiff_decoder
AVCodec ff_tiff_decoder
Definition: tiff.c:2198
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:155
OFFSET
#define OFFSET(x)
Definition: tiff.c:2183
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1645
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:727
TiffContext::sot
int sot
Definition: tiff.c:95
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:239
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1517
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:122
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:150
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:324
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2166
w
uint8_t w
Definition: llviddspenc.c:39
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:101
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:128
internal.h
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:91
AVPacket::data
uint8_t * data
Definition: packet.h:369
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:194
AVOption
AVOption.
Definition: opt.h:248
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:41
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
data
const char data[16]
Definition: mxf.c:142
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:127
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:427
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:61
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:86
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:146
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:97
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:154
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:156
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:121
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
av_clip_uint16_c
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:254
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:97
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:426
TiffGeoTagKeyName
Definition: tiff.h:215
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:185
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:129
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:141
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:105
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:96
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2184
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:66
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:198
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:54
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:189
dng_decode_strip
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:724
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:108
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:74
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:82
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:384
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:453
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:116
U
#define U(x)
Definition: vp56_arith.h:37
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:962
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:86
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:57
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:140
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:107
TiffContext::tile_length
int tile_length
Definition: tiff.c:102
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:70
val
static double val(void *priv, double ch)
Definition: aeval.c:76
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:372
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::dct_algo
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1706
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:176
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:73
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:113
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:105
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:40
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
TiffGeoTag
Definition: tiff.h:207
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:94
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:48
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:114
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
TiffContext::stripsize
int stripsize
Definition: tiff.c:96
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:170
width
#define width
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:536
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:124
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
TiffContext::width
int width
Definition: tiff.c:69
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:260
g
const char * g
Definition: vf_curves.c:117
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:62
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:643
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:109
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:138
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:634
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:39
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:196
TiffContext::black_level
unsigned black_level
Definition: tiff.c:87
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:65
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:56
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:85
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:168
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:62
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:74
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:75
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:162
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:261
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:423
if
if(ret)
Definition: filter_design.txt:179
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:122
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:112
TiffContext::height
int height
Definition: tiff.c:69
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:66
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:125
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2125
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1760
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:191
TiffContext
Definition: tiff.c:54
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:82
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:56
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:185
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:188
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:64
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:92
src
#define src
Definition: vp8dsp.c:255
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:192
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
mathops.h
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:425
MJpegDecodeContext
Definition: mjpegdec.h:52
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:78
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:38
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:75
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1656
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:112
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:60
TIFF_G3
@ TIFF_G3
Definition: tiff.h:123
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:49
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:81
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:71
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:623
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:49
bps
unsigned bps
Definition: movenc.c:1612
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:98
size
int size
Definition: twinvq_data.h:10344
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:70
AVFrameSideData::data
uint8_t * data
Definition: frame.h:228
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:135
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:61
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:397
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:55
TiffContext::fill_order
int fill_order
Definition: tiff.c:80
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:94
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:80
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:58
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
TiffContext::white_level
unsigned white_level
Definition: tiff.c:88
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:96
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:309
TiffContext::planar
int planar
Definition: tiff.c:76
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:52
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:50
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:157
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2191
RET_GEOKEY
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:132
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:106
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:69
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:197
TiffContext::le
int le
Definition: tiff.c:73
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:115
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:580
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:77
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:72
i
int i
Definition: input.c:407
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:195
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:186
TiffContext::tile_width
int tile_width
Definition: tiff.c:102
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:78
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:70
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:440
TiffContext::res
uint32_t res[4]
Definition: tiff.c:81
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:93
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:65
TiffContext::tile_count
int tile_count
Definition: tiff.c:103
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:262
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:75
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1719
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:41
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:103
TiffContext::stripoff
int stripoff
Definition: tiff.c:96
len
int len
Definition: vorbis_enc_data.h:452
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:184
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:90
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:59
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:79
avcodec.h
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1611
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:76
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:107
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:946
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:72
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:51
pos
unsigned int pos
Definition: spdifenc.c:412
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:137
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:53
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:183
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:83
AVCodecContext
main external API structure.
Definition: avcodec.h:536
ADD_METADATA
#define ADD_METADATA(count, name, sep)
ThreadFrame
Definition: thread.h:34
TiffContext::sstype
int sstype
Definition: tiff.c:94
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:77
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:42
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:610
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:97
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1211
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:131
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:613
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:96
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:116
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:100
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:64
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:301
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
shift
static int shift(int a, int b)
Definition: sonic.c:82
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
planes
static const struct @322 planes[]
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:95
faxcompr.h
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:226
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:121
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:130
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:187
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AVPacket
This structure stores compressed data.
Definition: packet.h:346
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:83
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:737
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:101
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:63
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:265
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:120
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:355
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:144
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:92
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:145
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
TiffContext::predictor
int predictor
Definition: tiff.c:79
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:424
int
int
Definition: ffmpeg_filter.c:158
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:190
TiffContext::strips
int strips
Definition: tiff.c:94
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:152
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:113
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:91
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:263
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:110
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:136