32 #define LZMA_API_STATIC 132 #define RET_GEOKEY(TYPE, array, element)\ 133 if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\ 134 key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\ 135 return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element; 180 #define RET_GEOKEY_VAL(TYPE, array)\ 181 if (val >= TIFF_##TYPE##_OFFSET &&\ 182 val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\ 183 return av_strdup(tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]); 235 snprintf(ap, 14,
"Unknown-%d", val);
243 uint64_t component_len;
244 if (!sep) sep =
", ";
245 component_len = 24LL + strlen(sep);
246 if (count >= (INT_MAX - 1)/component_len)
248 ap =
av_malloc(component_len * count + 1);
253 for (i = 0; i <
count; i++) {
254 unsigned l =
snprintf(ap, component_len,
"%.15g%s", dp[i], sep);
255 if(l >= component_len) {
261 ap0[strlen(ap0) - strlen(sep)] =
'\0';
294 value_norm = (float)value * scale_factor;
311 int is_single_comp,
int is_u16)
318 if (is_single_comp) {
324 for (line = 0; line < height / 2; line++) {
325 uint16_t *dst_u16 = (uint16_t *)dst;
326 uint16_t *src_u16 = (uint16_t *)src;
329 for (col = 0; col <
width; col++)
333 dst += dst_stride *
sizeof(uint16_t);
334 dst_u16 = (uint16_t *)dst;
337 for (col = 0; col <
width; col++)
340 dst += dst_stride *
sizeof(uint16_t);
341 src += src_stride *
sizeof(uint16_t);
347 for (line = 0; line <
height; line++) {
348 uint16_t *dst_u16 = (uint16_t *)dst;
349 uint16_t *src_u16 = (uint16_t *)src;
351 for (col = 0; col <
width; col++)
354 dst += dst_stride *
sizeof(uint16_t);
355 src += src_stride *
sizeof(uint16_t);
358 for (line = 0; line <
height; line++) {
362 for (col = 0; col <
width; col++)
379 while (--width >= 0) {
380 dst[(width+
offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
381 dst[(width+
offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
382 dst[(width+
offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
383 dst[(width+
offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
384 dst[(width+
offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
385 dst[(width+
offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
386 dst[(width+
offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
387 dst[(width+
offset)*8+0] = (usePtr ? src[width] : c) >> 7;
391 while (--width >= 0) {
392 dst[(width+
offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
393 dst[(width+
offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
394 dst[(width+
offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
395 dst[(width+
offset)*4+0] = (usePtr ? src[width] : c) >> 6;
399 while (--width >= 0) {
400 dst[(width+
offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
401 dst[(width+
offset)*2+0] = (usePtr ? src[width] : c) >> 4;
407 uint16_t *dst16 = (uint16_t *)dst;
413 for (
int i = 0;
i < s->
width;
i++) {
420 memcpy(dst + offset, src, width);
422 memset(dst + offset, c, width);
434 for (i = 0; i <
size; i++)
444 uint16_t *dst = (uint16_t *)(p->
data[0] + lnum * p->
linesize[0]);
448 for (
int i = 0;
i < s->
width;
i++) {
461 for (i = 0; i <
w; i++) {
470 for (i = 0; i <
w; i++) {
485 z_stream zstream = { 0 };
488 zstream.next_in =
src;
489 zstream.avail_in =
size;
490 zstream.next_out = dst;
491 zstream.avail_out = *
len;
492 zret = inflateInit(&zstream);
497 zret =
inflate(&zstream, Z_SYNC_FLUSH);
498 inflateEnd(&zstream);
499 *
len = zstream.total_out;
500 return zret == Z_STREAM_END ? Z_OK : zret;
505 int strip_start,
int is_yuv)
508 unsigned long outlen;
510 outlen =
width * lines;
521 ret = tiff_uncompress(zbuf, &outlen,
src,
size);
524 "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
525 (
unsigned long)
width * lines, ret);
530 for (line = 0; line < lines; line++) {
552 lzma_stream stream = LZMA_STREAM_INIT;
556 stream.avail_in =
size;
557 stream.next_out = dst;
558 stream.avail_out = *
len;
559 ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
560 if (ret != LZMA_OK) {
564 ret = lzma_code(&stream, LZMA_RUN);
566 *
len = stream.total_out;
567 return ret == LZMA_STREAM_END ? LZMA_OK :
ret;
572 int strip_start,
int is_yuv)
574 uint64_t outlen =
width * (uint64_t)lines;
586 ret = tiff_uncompress_lzma(buf, &outlen,
src,
size);
587 if (ret != LZMA_OK) {
589 "Uncompressing failed (%"PRIu64
" of %"PRIu64
") with error %d\n", outlen,
590 (uint64_t)
width * lines, ret);
595 for (line = 0; line < lines; line++) {
627 for (line = 0; line < lines; line++) {
635 int tile_byte_count,
int dst_x,
int dst_y,
int w,
int h)
640 int is_single_comp, is_u16, pixel_size;
655 mjpegdecctx->
bayer = 1;
675 is_u16 = (s->
bpp > 8);
697 pixel_size = (is_u16 ?
sizeof(uint16_t) :
sizeof(
uint8_t));
699 if (is_single_comp && !is_u16) {
705 dst_offset = dst_x + frame->
linesize[0] * dst_y / pixel_size;
706 dst_data = frame->
data[0] + dst_offset * pixel_size;
787 return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
788 strip_start, is_yuv);
791 "zlib support not enabled, " 792 "deflate compression not supported\n");
798 return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
799 strip_start, is_yuv);
802 "LZMA support not enabled\n");
812 if (size > 1 && !src[0] && (src[1]&1)) {
819 for (line = 0; line < lines; line++) {
821 if (pixels < width) {
863 if (is_dng && stride == 0)
866 for (line = 0; line < lines; line++) {
867 if (src - ssrc > size) {
877 if (ssrc + size - src < width)
882 dst, 1, src, 0, width, 0);
885 for (i = 0; i <
width; i++)
891 int is_u16, pixel_size_bytes, pixel_size_bits,
elements;
894 pixel_size_bits = (is_u16 ? 16 : 8);
895 pixel_size_bytes = (is_u16 ?
sizeof(uint16_t) :
sizeof(
uint8_t));
897 elements = width / pixel_size_bytes * pixel_size_bits / s->
bpp * s->
bppcount;
913 for (pixels = 0; pixels <
width;) {
914 if (ssrc + size - src < 2) {
921 if (pixels + code > width ||
922 ssrc + size - src < code) {
924 "Copy went out of bounds\n");
928 dst, 1, src, 0, code, pixels);
931 }
else if (code != -128) {
933 if (pixels + code > width) {
935 "Run went out of bounds\n");
940 dst, 0,
NULL, c, code, pixels);
946 for (i = 0; i <
width; i++)
967 int tile_offset_offset, tile_offset;
968 int tile_byte_count_offset, tile_byte_count;
969 int tile_count_x, tile_count_y;
971 int has_width_leftover, has_height_leftover;
972 int tile_x = 0, tile_y = 0;
973 int pos_x = 0, pos_y = 0;
990 for (tile_idx = 0; tile_idx < s->
tile_count; tile_idx++) {
991 tile_x = tile_idx % tile_count_x;
992 tile_y = tile_idx / tile_count_x;
994 if (has_width_leftover && tile_x == tile_count_x - 1)
999 if (has_height_leftover && tile_y == tile_count_y - 1)
1018 ret =
dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1025 if (tile_x == tile_count_x - 1) {
1041 int create_gray_palette = 0;
1046 "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1061 create_gray_palette = 1;
1151 "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1175 "This format is not supported (bpp=%d, bppcount=%d)\n",
1198 if (!create_gray_palette)
1203 uint32_t *pal = (uint32_t *)frame->
f->
data[1];
1204 for (i = 0; i < 1<<s->
bpp; i++)
1205 pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->
bpp) - 1) * 0x010101;
1214 s->
res[offset++] = num;
1216 if (s->
res[0] && s->
res[1] && s->
res[2] && s->
res[3]) {
1217 uint64_t num = s->
res[2] * (uint64_t)s->
res[1];
1218 uint64_t den = s->
res[0] * (uint64_t)s->
res[3];
1219 if (num > INT64_MAX || den > INT64_MAX) {
1224 num, den, INT32_MAX);
1288 if (count > 5 || count <= 0) {
1290 "This format is not supported (bpp=%d, %d components)\n",
1305 for (i = 0; i <
count; i++)
1316 "Samples per pixel requires a single value, many provided\n");
1319 if (value > 5 || value <= 0) {
1321 "Invalid samples per pixel %d\n", value);
1368 if (!value || (type ==
TIFF_LONG && value == UINT_MAX))
1374 if (value > INT_MAX) {
1376 "strippos %u too large\n", value);
1390 if (value > INT_MAX) {
1392 "stripsize %u too large\n", value);
1406 set_sar(s, tag, value, value2);
1434 for (
int i = 0; i <
count; i++)
1459 if (count != 2 || (
ff_tget(&s->
gb, type, s->
le) != 2 &&
1491 "PhotometricInterpretation 0x%04X",
1496 "unknown\n", value);
1503 "Unknown FillOrder value %d, trying default one\n", value);
1511 if (count / 3 > 256 ||
1515 pal_gb[0] = pal_gb[1] = pal_gb[2] = s->
gb;
1525 for (i = 0; i < count / 3; i++) {
1526 uint32_t p = 0xFF000000;
1527 p |= (
ff_tget(&pal_gb[0], type, s->
le) >> off) << 16;
1528 p |= (
ff_tget(&pal_gb[1], type, s->
le) >> off) << 8;
1529 p |=
ff_tget(&pal_gb[2], type, s->
le) >> off;
1543 for (i = 0; i <
count; i++) {
1560 #define ADD_METADATA(count, name, sep)\ 1561 if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\ 1562 av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\ 1609 if (count >= INT_MAX /
sizeof(int64_t))
1618 for (i = 0; i <
count; i++)
1721 unsigned int ver[4];
1728 ver[0], ver[1], ver[2], ver[3]);
1743 "Unknown or unsupported tag %d/0x%0X\n",
1751 "This format is not supported (bpp=%d, %d components)\n",
1766 unsigned off, last_off;
1769 unsigned soff, ssize;
1773 int retry_for_subifd, retry_for_page;
1775 int has_tile_bits, has_strip_bits;
1783 }
else if (off >= UINT_MAX - 14 || avpkt->
size < off + 14) {
1804 for (i = 0; i < 65536; i++)
1816 for (i = 0; i < entries; i++) {
1832 if (retry_for_page) {
1835 }
else if (retry_for_subifd) {
1840 if (retry_for_subifd || retry_for_page) {
1845 if (off <= last_off) {
1849 if (off >= UINT_MAX - 14 || avpkt->
size < off + 14) {
1884 if (bps < 8 || bps > 32)
1891 av_log(avctx,
AV_LOG_ERROR,
"BlackLevel (%"PRId32
") must be less than WhiteLevel (%"PRId32
")\n",
1908 if (has_tile_bits && has_strip_bits) {
1909 int tiled_dng = s->
is_tiled && is_dng;
1919 if (!s->
is_tiled || has_strip_bits) {
1970 for (plane = 0; plane <
planes; plane++) {
1972 int remaining = avpkt->
size;
1975 dst = p->
data[plane];
1978 stride = stride * 5 / 4;
1984 for (i = 0; i < s->
height; i += s->
rps) {
1997 if (soff > avpkt->
size || ssize > avpkt->
size - soff || ssize > remaining) {
2019 dst = five_planes ? five_planes : p->
data[plane];
2023 ssize = s->
width * soff;
2030 for (i = 0; i < decoded_height; i++) {
2031 for (j = soff; j < ssize; j += 2)
2041 for (i = 0; i < decoded_height; i++) {
2042 for (j = soff; j < ssize; j += 2)
2047 for (i = 0; i < decoded_height; i++) {
2048 for (j = soff; j < ssize; j++)
2049 dst[j] += dst[j - soff];
2057 dst = p->
data[plane];
2058 for (i = 0; i < s->
height; i++) {
2059 for (j = 0; j <
stride; j++)
2060 dst[j] = c - dst[j];
2069 dst = p->
data[plane];
2070 for (i = 0; i < s->
height; i++) {
2071 for (j = 0; j < s->
width; j++) {
2072 int k = 255 - src[x * j + 3];
2073 int r = (255 - src[x * j ]) * k;
2074 int g = (255 - src[x * j + 1]) * k;
2075 int b = (255 - src[x * j + 2]) * k;
2076 dst[4 * j ] = r * 257 >> 16;
2077 dst[4 * j + 1] = g * 257 >> 16;
2078 dst[4 * j + 2] = b * 257 >> 16;
2087 dst = p->
data[plane];
2088 for (i = 0; i < s->
height; i++) {
2089 for (j = 0; j < s->
width; j++) {
2090 uint64_t k = 65535 -
AV_RB16(dst + 8 * j + 6);
2091 uint64_t
r = (65535 -
AV_RB16(dst + 8 * j )) * k;
2092 uint64_t
g = (65535 -
AV_RB16(dst + 8 * j + 2)) * k;
2093 uint64_t
b = (65535 -
AV_RB16(dst + 8 * j + 4)) * k;
2094 AV_WB16(dst + 8 * j , r * 65537 >> 32);
2095 AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2096 AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2097 AV_WB16(dst + 8 * j + 6, 65535);
2112 uint16_t *dst = (uint16_t *)p->
data[0];
2114 for (j = 0; j < s->
width; j++)
2183 #define OFFSET(x) offsetof(TiffContext, x) 2209 .priv_class = &tiff_decoder_class,
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values ...
int dct_algo
DCT algorithm, see FF_DCT_* below.
ptrdiff_t const GLvoid * data
"Linear transfer characteristics"
8 bits gray, 8 bits alpha
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
static av_cold int init(AVCodecContext *avctx)
const uint8_t ff_reverse[256]
#define avpriv_request_sample(...)
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
TIFF constants & data structures.
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
const char * av_default_item_name(void *ptr)
Return the context name.
av_cold void ff_lzw_decode_close(LZWState **p)
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
av_cold void ff_lzw_decode_open(LZWState **p)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static void free_geotags(TiffContext *const s)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_ARRAY_ELEMS(a)
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
unsigned int yuv_line_size
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static const struct @322 planes[]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Macro definitions for various function/variable attributes.
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
#define AV_PIX_FMT_GRAY12
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
planar GBRA 4:4:4:4 64bpp, big-endian
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5) ...
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
8 bits with AV_PIX_FMT_RGB32 palette
#define TIFF_GEO_KEY_USER_DEFINED
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Multithreading support functions.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
GLsizei GLboolean const GLfloat * value
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
static av_cold int tiff_init(AVCodecContext *avctx)
Structure to hold side data for an AVFrame.
planar GBR 4:4:4 48bpp, big-endian
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
#define AVERROR_EOF
End of file.
bitstream reader API header.
AVDictionary * metadata
metadata.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_BAYER_GRBG16
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
16 bits gray, 16 bits alpha (big-endian)
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_cold int tiff_end(AVCodecContext *avctx)
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
AVCodecContext * avctx_mjpeg
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
enum TiffPhotometric photometric
uint8_t nb_components
The number of components each pixel has, (1-4)
enum AVPictureType pict_type
Picture type of the frame.
static const AVOption tiff_options[]
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_PIX_FMT_GRAY16
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int width
picture width / height.
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
static const ElemCat * elements[ELEMENT_COUNT]
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
#define AV_PIX_FMT_BAYER_BGGR16
#define TIFF_GEO_KEY_UNDEFINED
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
#define AV_PIX_FMT_BAYER_GBRG16
TiffType
TIFF types in ascenting priority (last in the list is highest)
#define AV_LOG_INFO
Standard information.
static const char * get_geokey_name(int key)
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
char * av_strdup(const char *s)
Duplicate a string.
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define AV_OPT_FLAG_VIDEO_PARAM
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Rational number (pair of numerator and denominator).
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Digital Negative (DNG) image part of an CinemaDNG image sequence.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
static int get_geokey_type(int key)
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const TiffGeoTagKeyName tiff_projection_codes[]
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
#define AV_PIX_FMT_BAYER_RGGB16
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
int tile_byte_counts_offset
common internal api header.
static char * get_geokey_val(int key, int val)
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
planar GBRA 4:4:4:4 32bpp
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
static char * doubles2str(double *dp, int count, const char *sep)
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
#define RET_GEOKEY_VAL(TYPE, array)
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Digital Negative (DNG) image.
Y , 16bpp, little-endian.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
16 bits gray, 16 bits alpha (little-endian)
int flags2
AV_CODEC_FLAG2_*.
static const AVClass tiff_decoder_class
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
#define RET_GEOKEY(TYPE, array, element)
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
static int init_image(TiffContext *s, ThreadFrame *frame)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVColorTransferCharacteristic color_trc
planar GBR 4:4:4 48bpp, little-endian
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
static int cmp_id_key(const void *id, const void *k)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
planar GBRA 4:4:4:4 64bpp, little-endian
#define ADD_METADATA(count, name, sep)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static double val(void *priv, double ch)
This structure stores compressed data.
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...
CCITT Fax Group 3 and 4 decompression.
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
void * av_mallocz_array(size_t nmemb, size_t size)