Go to the documentation of this file.
24 #include "config_components.h"
135 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
140 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
145 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
152 int bits_per_pixel,
int pass,
153 int color_type,
const uint8_t *
src)
155 int x,
mask, dsp_mask, j, src_x,
b, bpp;
162 switch (bits_per_pixel) {
165 for (x = 0; x <
width; x++) {
167 if ((dsp_mask << j) & 0x80) {
168 b = (
src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
169 dst[x >> 3] &= 0xFF7F>>j;
170 dst[x >> 3] |=
b << (7 - j);
172 if ((
mask << j) & 0x80)
178 for (x = 0; x <
width; x++) {
179 int j2 = 2 * (x & 3);
181 if ((dsp_mask << j) & 0x80) {
182 b = (
src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
183 dst[x >> 2] &= 0xFF3F>>j2;
184 dst[x >> 2] |=
b << (6 - j2);
186 if ((
mask << j) & 0x80)
192 for (x = 0; x <
width; x++) {
195 if ((dsp_mask << j) & 0x80) {
196 b = (
src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
197 dst[x >> 1] &= 0xFF0F>>j2;
198 dst[x >> 1] |=
b << (4 - j2);
200 if ((
mask << j) & 0x80)
205 bpp = bits_per_pixel >> 3;
208 for (x = 0; x <
width; x++) {
210 if ((dsp_mask << j) & 0x80) {
214 if ((
mask << j) & 0x80)
221 #define UNROLL1(bpp, op) \
230 for (; i <= size - bpp; i += bpp) { \
231 dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
234 dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
237 dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
240 dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
244 #define UNROLL_FILTER(op) \
247 } else if (bpp == 2) { \
249 } else if (bpp == 3) { \
251 } else if (bpp == 4) { \
254 for (; i < size; i++) { \
255 dst[i] = op(dst[i - bpp], src[i], last[i]); \
260 const uint8_t *
src,
const uint8_t *last,
int size,
int bpp)
264 switch (filter_type) {
269 for (
i = 0;
i < bpp;
i++)
273 for (;
i <
size;
i += bpp) {
274 unsigned s = *(
const int *)(
src +
i);
275 p = ((
s & 0x7f7f7f7f) + (
p & 0x7f7f7f7f)) ^ ((
s ^
p) & 0x80808080);
276 *(
int *)(
dst +
i) =
p;
279 #define OP_SUB(x, s, l) ((x) + (s))
287 for (
i = 0;
i < bpp;
i++) {
291 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
295 for (
i = 0;
i < bpp;
i++) {
299 if (bpp > 2 &&
size > 4) {
316 #define YUV2RGB(NAME, TYPE) \
317 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
320 for (i = 0; i < size - 2; i += 3 + alpha) { \
321 int g = dst [i + 1]; \
332 if (
s->interlace_type) {
335 return 100 - 100 *
s->y /
s->cur_h;
342 uint8_t *ptr, *last_row;
345 if (!
s->interlace_type) {
346 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
348 last_row =
s->last_row;
350 last_row = ptr - dst_stride;
353 last_row,
s->row_size,
s->bpp);
356 if (
s->bit_depth == 16) {
357 deloco_rgb16((uint16_t *)(ptr - dst_stride),
s->row_size / 2,
360 deloco_rgb8(ptr - dst_stride,
s->row_size,
365 if (
s->y ==
s->cur_h) {
368 if (
s->bit_depth == 16) {
369 deloco_rgb16((uint16_t *)ptr,
s->row_size / 2,
372 deloco_rgb8(ptr,
s->row_size,
380 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
387 s->last_row,
s->pass_row_size,
s->bpp);
388 FFSWAP(uint8_t *,
s->last_row,
s->tmp_row);
389 FFSWAP(
unsigned int,
s->last_row_size,
s->tmp_row_size);
394 s->color_type,
s->last_row);
397 if (
s->y ==
s->cur_h) {
398 memset(
s->last_row, 0,
s->row_size);
409 s->crow_size =
s->pass_row_size + 1;
410 if (
s->pass_row_size != 0)
422 uint8_t *
dst, ptrdiff_t dst_stride)
424 z_stream *
const zstream = &
s->zstream.zstream;
427 zstream->next_in = gb->
buffer;
430 while (zstream->avail_in > 0) {
432 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
436 if (zstream->avail_out == 0) {
440 zstream->avail_out =
s->crow_size;
441 zstream->next_out =
s->crow_buf;
443 if (
ret == Z_STREAM_END && zstream->avail_in > 0) {
445 "%d undecompressed bytes left in buffer\n", zstream->avail_in);
453 const uint8_t *data_end,
void *logctx)
456 z_stream *
const zstream = &z.
zstream;
463 zstream->next_in =
data;
464 zstream->avail_in = data_end -
data;
467 while (zstream->avail_in > 0) {
473 zstream->next_out = buf;
474 zstream->avail_out = buf_size - 1;
476 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
480 bp->len += zstream->next_out - buf;
481 if (
ret == Z_STREAM_END)
485 bp->str[bp->len] = 0;
499 for (
i = 0;
i < size_in;
i++)
500 extra += !!(in[
i] & 0x80);
501 if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
506 for (
i = 0;
i < size_in;
i++) {
508 *(q++) = 0xC0 | (in[
i] >> 6);
509 *(q++) = 0x80 | (in[
i] & 0x3F);
520 size_t len = strlen(txt_utf8);
521 const char *ptr = txt_utf8;
522 const char *end = txt_utf8 +
len;
525 const uint8_t *exif_end;
528 while (*ptr++ !=
'\n') {
534 if (end - ptr < 4 || strncmp(
"exif", ptr, 4))
546 size_t nlen = exif_len * 10 + (*ptr -
'0');
561 if ((exif_len & ~SIZE_MAX) || end - ptr < 2 * exif_len)
576 ptr += strlen(
"Exif ") * 2 - 1;
578 exif_ptr =
s->exif_data->data;
579 exif_end = exif_ptr +
s->exif_data->size;
581 while (exif_ptr < exif_end) {
582 while (++ptr < end) {
583 if (*ptr >=
'0' && *ptr <=
'9') {
584 *exif_ptr = (*ptr -
'0') << 4;
587 if (*ptr >=
'a' && *ptr <=
'f') {
588 *exif_ptr = (*ptr -
'a' + 10) << 4;
592 while (++ptr < end) {
593 if (*ptr >=
'0' && *ptr <=
'9') {
594 *exif_ptr += *ptr -
'0';
597 if (*ptr >=
'a' && *ptr <=
'f') {
598 *exif_ptr += *ptr -
'a' + 10;
615 const char *keyword =
data;
616 const char *keyword_end = memchr(keyword, 0, data_end -
data);
617 char *kw_utf8 =
NULL, *txt_utf8 =
NULL;
624 data = keyword_end + 1;
627 if (
data == data_end)
638 text_len = data_end -
data;
652 if (!strcmp(kw_utf8,
"Raw profile type exif")) {
684 s->width =
s->cur_w = bytestream2_get_be32(gb);
685 s->height =
s->cur_h = bytestream2_get_be32(gb);
687 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
691 s->bit_depth = bytestream2_get_byte(gb);
692 if (
s->bit_depth != 1 &&
s->bit_depth != 2 &&
s->bit_depth != 4 &&
693 s->bit_depth != 8 &&
s->bit_depth != 16) {
697 s->color_type = bytestream2_get_byte(gb);
698 s->compression_type = bytestream2_get_byte(gb);
699 if (
s->compression_type) {
703 s->filter_type = bytestream2_get_byte(gb);
704 s->interlace_type = bytestream2_get_byte(gb);
708 "compression_type=%d filter_type=%d interlace_type=%d\n",
709 s->width,
s->height,
s->bit_depth,
s->color_type,
710 s->compression_type,
s->filter_type,
s->interlace_type);
714 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
771 if (
s->cicp_range == 0) {
774 }
else if (
s->cicp_range != 1) {
778 }
else if (
s->iccp_data) {
781 s->iccp_data_len, &sd);
785 memcpy(sd->
data,
s->iccp_data,
s->iccp_data_len);
788 }
else if (
s->have_srgb) {
791 }
else if (
s->have_chrm) {
810 if (
s->iccp_data ||
s->have_srgb ||
s->have_cicp) {
812 }
else if (
s->gamma) {
822 if (
s->gamma > 45355 &&
s->gamma < 45555)
824 else if (
s->gamma > 35614 &&
s->gamma < 35814)
826 else if (
s->gamma > 38362 &&
s->gamma < 38562)
828 else if (
s->gamma > 99900 &&
s->gamma < 100100)
834 if (!
s->have_cicp ||
s->cicp_range == 1)
842 if (!
s->has_trns &&
s->significant_bits > 0)
857 clli->
MaxCLL =
s->clli_max / 10000;
858 clli->
MaxFALL =
s->clli_avg / 10000;
871 for (
int i = 0;
i < 3;
i++) {
890 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
905 s->bits_per_pixel =
s->bit_depth *
s->channels;
906 s->bpp = (
s->bits_per_pixel + 7) >> 3;
907 s->row_size = (
s->cur_w *
s->bits_per_pixel + 7) >> 3;
909 if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
912 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
915 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
918 }
else if (
s->bit_depth == 16 &&
921 }
else if (
s->bit_depth == 16 &&
924 }
else if (
s->bit_depth == 16 &&
927 }
else if ((
s->bits_per_pixel == 1 ||
s->bits_per_pixel == 2 ||
s->bits_per_pixel == 4 ||
s->bits_per_pixel == 8) &&
932 }
else if (
s->bit_depth == 8 &&
935 }
else if (
s->bit_depth == 16 &&
940 "Bit depth %d color type %d",
941 s->bit_depth,
s->color_type);
965 "and color type %d with TRNS",
966 s->bit_depth,
s->color_type);
970 s->bpp += byte_depth;
1014 if (!
s->interlace_type) {
1015 s->crow_size =
s->row_size + 1;
1021 s->crow_size =
s->pass_row_size + 1;
1023 ff_dlog(avctx,
"row_size=%d crow_size =%d\n",
1024 s->row_size,
s->crow_size);
1028 memcpy(
p->data[1],
s->palette, 256 *
sizeof(uint32_t));
1033 if (
s->interlace_type ||
1045 s->crow_buf =
s->buffer + 15;
1046 s->zstream.zstream.avail_out =
s->crow_size;
1047 s->zstream.zstream.next_out =
s->crow_buf;
1054 s->bpp -= byte_depth;
1059 s->bpp += byte_depth;
1073 if ((length % 3) != 0 || length > 256 * 3)
1077 for (
i = 0;
i < n;
i++) {
1078 r = bytestream2_get_byte(gb);
1079 g = bytestream2_get_byte(gb);
1080 b = bytestream2_get_byte(gb);
1081 s->palette[
i] = (0xFF
U << 24) | (
r << 16) | (
g << 8) |
b;
1083 for (;
i < 256;
i++)
1084 s->palette[
i] = (0xFFU << 24);
1107 if (length > 256 || !(
s->hdr_state &
PNG_PLTE))
1110 for (
i = 0;
i < length;
i++) {
1111 unsigned v = bytestream2_get_byte(gb);
1112 s->palette[
i] = (
s->palette[
i] & 0x00ffffff) | (v << 24);
1120 for (
i = 0;
i < length / 2;
i++) {
1124 if (
s->bit_depth > 8)
1125 AV_WB16(&
s->transparent_color_be[2 *
i], v);
1127 s->transparent_color_be[
i] = v;
1143 while ((
s->iccp_name[cnt++] = bytestream2_get_byte(gb)) && cnt < 81);
1150 if (bytestream2_get_byte(gb) != 0) {
1163 s->iccp_data_len = bp.len;
1167 s->iccp_name[0] = 0;
1198 int b = bytestream2_get_byteu(gb);
1206 s->significant_bits =
bits;
1215 uint8_t *pd =
p->data[0];
1216 for (j = 0; j <
s->height; j++) {
1218 for (k = 7; k >= 1; k--)
1219 if ((
s->width&7) >= k)
1220 pd[8*
i + k - 1] = (pd[
i]>>8-k) & 1;
1221 for (
i--;
i >= 0;
i--) {
1222 pd[8*
i + 7]= pd[
i] & 1;
1223 pd[8*
i + 6]= (pd[
i]>>1) & 1;
1224 pd[8*
i + 5]= (pd[
i]>>2) & 1;
1225 pd[8*
i + 4]= (pd[
i]>>3) & 1;
1226 pd[8*
i + 3]= (pd[
i]>>4) & 1;
1227 pd[8*
i + 2]= (pd[
i]>>5) & 1;
1228 pd[8*
i + 1]= (pd[
i]>>6) & 1;
1229 pd[8*
i + 0]= pd[
i]>>7;
1231 pd +=
p->linesize[0];
1233 }
else if (
s->bits_per_pixel == 2) {
1235 uint8_t *pd =
p->data[0];
1236 for (j = 0; j <
s->height; j++) {
1239 if ((
s->width&3) >= 3) pd[4*
i + 2]= (pd[
i] >> 2) & 3;
1240 if ((
s->width&3) >= 2) pd[4*
i + 1]= (pd[
i] >> 4) & 3;
1241 if ((
s->width&3) >= 1) pd[4*
i + 0]= pd[
i] >> 6;
1242 for (
i--;
i >= 0;
i--) {
1243 pd[4*
i + 3]= pd[
i] & 3;
1244 pd[4*
i + 2]= (pd[
i]>>2) & 3;
1245 pd[4*
i + 1]= (pd[
i]>>4) & 3;
1246 pd[4*
i + 0]= pd[
i]>>6;
1249 if ((
s->width&3) >= 3) pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1250 if ((
s->width&3) >= 2) pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1251 if ((
s->width&3) >= 1) pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1252 for (
i--;
i >= 0;
i--) {
1253 pd[4*
i + 3]= ( pd[
i] & 3)*0x55;
1254 pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1255 pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1256 pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1259 pd +=
p->linesize[0];
1261 }
else if (
s->bits_per_pixel == 4) {
1263 uint8_t *pd =
p->data[0];
1264 for (j = 0; j <
s->height; j++) {
1267 if (
s->width&1) pd[2*
i+0]= pd[
i]>>4;
1268 for (
i--;
i >= 0;
i--) {
1269 pd[2*
i + 1] = pd[
i] & 15;
1270 pd[2*
i + 0] = pd[
i] >> 4;
1273 if (
s->width & 1) pd[2*
i + 0]= (pd[
i] >> 4) * 0x11;
1274 for (
i--;
i >= 0;
i--) {
1275 pd[2*
i + 1] = (pd[
i] & 15) * 0x11;
1276 pd[2*
i + 0] = (pd[
i] >> 4) * 0x11;
1279 pd +=
p->linesize[0];
1287 uint32_t sequence_number;
1288 int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
1303 sequence_number = bytestream2_get_be32(gb);
1304 cur_w = bytestream2_get_be32(gb);
1305 cur_h = bytestream2_get_be32(gb);
1306 x_offset = bytestream2_get_be32(gb);
1307 y_offset = bytestream2_get_be32(gb);
1309 dispose_op = bytestream2_get_byte(gb);
1310 blend_op = bytestream2_get_byte(gb);
1312 if (sequence_number == 0 &&
1313 (cur_w !=
s->width ||
1314 cur_h !=
s->height ||
1317 cur_w <= 0 || cur_h <= 0 ||
1318 x_offset < 0 || y_offset < 0 ||
1319 cur_w >
s->width - x_offset|| cur_h >
s->height - y_offset)
1327 if ((sequence_number == 0 || !
s->last_picture.f) &&
1347 s->x_offset = x_offset;
1348 s->y_offset = y_offset;
1349 s->dispose_op = dispose_op;
1350 s->blend_op = blend_op;
1358 uint8_t *pd =
p->data[0];
1359 uint8_t *pd_last =
s->last_picture.f->data[0];
1362 ls =
FFMIN(ls,
s->width *
s->bpp);
1365 for (j = 0; j <
s->height; j++) {
1366 for (
i = 0;
i < ls;
i++)
1367 pd[
i] += pd_last[
i];
1368 pd +=
p->linesize[0];
1369 pd_last +=
s->last_picture.f->linesize[0];
1375 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1380 uint8_t *
dst =
p->data[0];
1381 ptrdiff_t dst_stride =
p->linesize[0];
1382 const uint8_t *
src =
s->last_picture.f->data[0];
1383 ptrdiff_t src_stride =
s->last_picture.f->linesize[0];
1399 for (y = 0; y <
s->y_offset; y++)
1400 memcpy(
dst + y * dst_stride,
src + y * src_stride,
p->width * bpp);
1401 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; y++) {
1402 memcpy(
dst + y * dst_stride,
src + y * src_stride,
s->x_offset * bpp);
1403 memcpy(
dst + y * dst_stride + (
s->x_offset +
s->cur_w) * bpp,
1404 src + y * src_stride + (
s->x_offset +
s->cur_w) * bpp,
1405 (
p->width -
s->cur_w -
s->x_offset) * bpp);
1407 for (y =
s->y_offset +
s->cur_h; y < p->
height; y++)
1408 memcpy(
dst + y * dst_stride,
src + y * src_stride,
p->width * bpp);
1412 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; ++y) {
1413 uint8_t *foreground =
dst + dst_stride * y + bpp *
s->x_offset;
1414 const uint8_t *background =
src + src_stride * y + bpp *
s->x_offset;
1415 for (x =
s->x_offset; x < s->x_offset +
s->cur_w; ++x, foreground += bpp, background += bpp) {
1417 uint8_t foreground_alpha, background_alpha, output_alpha;
1426 foreground_alpha = foreground[3];
1427 background_alpha = background[3];
1431 foreground_alpha = foreground[1];
1432 background_alpha = background[1];
1436 if (foreground_alpha == 255)
1439 if (foreground_alpha == 0) {
1440 memcpy(foreground, background, bpp);
1444 output_alpha = foreground_alpha +
FAST_DIV255((255 - foreground_alpha) * background_alpha);
1448 for (
b = 0;
b < bpp - 1; ++
b) {
1449 if (output_alpha == 0) {
1451 }
else if (background_alpha == 255) {
1452 output[
b] =
FAST_DIV255(foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background[
b]);
1454 output[
b] = (255 * foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background_alpha * background[
b]) / (255 * output_alpha);
1458 memcpy(foreground,
output, bpp);
1471 const ptrdiff_t dst_stride =
s->picture.f->linesize[0];
1472 uint8_t *
dst =
s->picture.f->data[0] +
s->y_offset * dst_stride + bpp *
s->x_offset;
1476 for (
size_t y = 0; y <
s->cur_h; y++) {
1477 memset(
dst, 0, bpp *
s->cur_w);
1486 uint32_t
tag, length;
1487 int decode_next_dat = 0;
1515 length = bytestream2_get_be32(&
s->gb);
1522 uint32_t crc_sig =
AV_RB32(
s->gb.buffer + length + 4);
1523 uint32_t crc_cal = ~
av_crc(crc_tab, UINT32_MAX,
s->gb.buffer, length + 4);
1524 if (crc_sig ^ crc_cal) {
1536 tag = bytestream2_get_le32(&
s->gb);
1547 case MKTAG(
'I',
'H',
'D',
'R'):
1548 case MKTAG(
'p',
'H',
'Y',
's'):
1549 case MKTAG(
't',
'E',
'X',
't'):
1550 case MKTAG(
'I',
'D',
'A',
'T'):
1551 case MKTAG(
't',
'R',
'N',
'S'):
1552 case MKTAG(
's',
'R',
'G',
'B'):
1553 case MKTAG(
'c',
'I',
'C',
'P'):
1554 case MKTAG(
'c',
'H',
'R',
'M'):
1555 case MKTAG(
'g',
'A',
'M',
'A'):
1563 case MKTAG(
'I',
'H',
'D',
'R'):
1567 case MKTAG(
'p',
'H',
'Y',
's'):
1571 case MKTAG(
'f',
'c',
'T',
'L'):
1576 decode_next_dat = 1;
1578 case MKTAG(
'f',
'd',
'A',
'T'):
1585 bytestream2_get_be32(&gb_chunk);
1587 case MKTAG(
'I',
'D',
'A',
'T'):
1593 case MKTAG(
'P',
'L',
'T',
'E'):
1596 case MKTAG(
't',
'R',
'N',
'S'):
1599 case MKTAG(
't',
'E',
'X',
't'):
1603 case MKTAG(
'z',
'T',
'X',
't'):
1607 case MKTAG(
's',
'T',
'E',
'R'): {
1608 int mode = bytestream2_get_byte(&gb_chunk);
1611 s->stereo_mode =
mode;
1614 "Unknown value in sTER chunk (%d)\n",
mode);
1618 case MKTAG(
'c',
'I',
'C',
'P'):
1619 s->cicp_primaries = bytestream2_get_byte(&gb_chunk);
1620 s->cicp_trc = bytestream2_get_byte(&gb_chunk);
1621 if (bytestream2_get_byte(&gb_chunk) != 0)
1623 s->cicp_range = bytestream2_get_byte(&gb_chunk);
1624 if (
s->cicp_range != 0 &&
s->cicp_range != 1)
1628 case MKTAG(
's',
'R',
'G',
'B'):
1633 case MKTAG(
'i',
'C',
'C',
'P'): {
1638 case MKTAG(
'c',
'H',
'R',
'M'): {
1641 s->white_point[0] = bytestream2_get_be32(&gb_chunk);
1642 s->white_point[1] = bytestream2_get_be32(&gb_chunk);
1645 for (
i = 0;
i < 3;
i++) {
1646 s->display_primaries[
i][0] = bytestream2_get_be32(&gb_chunk);
1647 s->display_primaries[
i][1] = bytestream2_get_be32(&gb_chunk);
1652 case MKTAG(
's',
'B',
'I',
'T'):
1656 case MKTAG(
'g',
'A',
'M',
'A'): {
1659 s->gamma = bytestream2_get_be32(&gb_chunk);
1671 case MKTAG(
'c',
'L',
'L',
'i'):
1672 case MKTAG(
'c',
'L',
'L',
'I'):
1678 s->clli_max = bytestream2_get_be32u(&gb_chunk);
1679 s->clli_avg = bytestream2_get_be32u(&gb_chunk);
1681 case MKTAG(
'm',
'D',
'C',
'v'):
1682 case MKTAG(
'm',
'D',
'C',
'V'):
1688 for (
int i = 0;
i < 3;
i++) {
1689 s->mdcv_primaries[
i][0] = bytestream2_get_be16u(&gb_chunk);
1690 s->mdcv_primaries[
i][1] = bytestream2_get_be16u(&gb_chunk);
1692 s->mdcv_white_point[0] = bytestream2_get_be16u(&gb_chunk);
1693 s->mdcv_white_point[1] = bytestream2_get_be16u(&gb_chunk);
1694 s->mdcv_max_lum = bytestream2_get_be32u(&gb_chunk);
1695 s->mdcv_min_lum = bytestream2_get_be32u(&gb_chunk);
1697 case MKTAG(
'e',
'X',
'I',
'f'):
1702 case MKTAG(
'I',
'E',
'N',
'D'):
1727 if (
s->bits_per_pixel <= 4)
1742 for (
int y = 0; y <
s->height; y++) {
1743 uint8_t *row = &
p->data[0][
p->linesize[0] * y];
1745 for (
int x =
s->width - 1; x >= 0; x--) {
1746 const uint8_t idx = row[x];
1748 row[4*x+2] =
s->palette[idx] & 0xFF;
1749 row[4*x+1] = (
s->palette[idx] >> 8 ) & 0xFF;
1750 row[4*x+0] = (
s->palette[idx] >> 16) & 0xFF;
1751 row[4*x+3] =
s->palette[idx] >> 24;
1758 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
1759 size_t raw_bpp =
s->bpp - byte_depth;
1764 for (y = 0; y <
s->height; ++y) {
1765 uint8_t *row = &
p->data[0][
p->linesize[0] * y];
1767 if (
s->bpp == 2 && byte_depth == 1) {
1768 uint8_t *
pixel = &row[2 *
s->width - 1];
1769 uint8_t *rowp = &row[1 *
s->width - 1];
1770 int tcolor =
s->transparent_color_be[0];
1771 for (x =
s->width; x > 0; --x) {
1772 *
pixel-- = *rowp == tcolor ? 0 : 0xff;
1775 }
else if (
s->bpp == 4 && byte_depth == 1) {
1776 uint8_t *
pixel = &row[4 *
s->width - 1];
1777 uint8_t *rowp = &row[3 *
s->width - 1];
1778 int tcolor =
AV_RL24(
s->transparent_color_be);
1779 for (x =
s->width; x > 0; --x) {
1787 for (x =
s->width; x > 0; --x) {
1788 uint8_t *
pixel = &row[
s->bpp * (x - 1)];
1789 memmove(
pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1791 if (!memcmp(
pixel,
s->transparent_color_be, raw_bpp)) {
1792 memset(&
pixel[raw_bpp], 0, byte_depth);
1794 memset(&
pixel[raw_bpp], 0xff, byte_depth);
1802 if (
s->last_picture.f) {
1804 &&
s->last_picture.f->width ==
p->width
1805 &&
s->last_picture.f->height==
p->height
1806 &&
s->last_picture.f->format==
p->format
1810 else if (CONFIG_APNG_DECODER &&
1830 s->iccp_data_len = 0;
1831 s->iccp_name[0] = 0;
1833 s->stereo_mode = -1;
1846 if (
s->stereo_mode >= 0) {
1865 #if CONFIG_PNG_DECODER
1870 const uint8_t *buf = avpkt->
data;
1871 int buf_size = avpkt->
size;
1880 sig = bytestream2_get_be64(&
s->gb);
1887 s->y =
s->has_trns = 0;
1892 ret = inflateReset(&
s->zstream.zstream);
1923 #if CONFIG_APNG_DECODER
1936 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1944 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
2028 s->last_row_size = 0;
2030 s->tmp_row_size = 0;
2040 #if CONFIG_APNG_DECODER
2058 #if CONFIG_PNG_DECODER
static void error(const char *err)
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_BPRINT_SIZE_UNLIMITED
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void clear_frame_metadata(PNGDecContext *s)
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is represented.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
enum AVColorRange cicp_range
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
@ AVALPHA_MODE_STRAIGHT
Alpha channel is independent of color values.
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Struct that contains both white point location and primaries location, providing the complete descrip...
unsigned int last_row_size
#define APNG_FCTL_CHUNK_SIZE
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
int ff_png_get_nb_channels(int color_type)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int decode_text_chunk(PNGDecContext *s, GetByteContext *gb, int compressed)
uint16_t mdcv_white_point[2]
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
AVColorPrimaries
Chromaticity coordinates of the source primaries.
unsigned int tmp_row_size
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
@ APNG_DISPOSE_OP_BACKGROUND
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define FF_DEBUG_PICT_INFO
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
static av_cold void close(AVCodecParserContext *s)
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define PNG_FILTER_TYPE_LOCO
AVCodec p
The public AVCodec.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
static av_cold int png_dec_end(AVCodecContext *avctx)
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
enum PNGImageState pic_state
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb, AVFrame *p)
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
#define PNG_COLOR_TYPE_RGB_ALPHA
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
#define YUV2RGB(NAME, TYPE)
#define FF_CODEC_DECODE_CB(func)
@ AVCOL_PRI_NB
Not part of ABI.
const FFCodec ff_apng_decoder
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define PNG_COLOR_TYPE_RGB
const FFCodec ff_png_decoder
#define AV_EF_IGNORE_ERR
ignore errors and continue
#define av_assert0(cond)
assert() equivalent, that is always enabled.
enum PNGHeaderState hdr_state
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
static int percent_missing(PNGDecContext *s)
enum AVColorPrimaries av_csp_primaries_id_from_desc(const AVColorPrimariesDesc *prm)
Detects which enum AVColorPrimaries constant corresponds to the given complete gamut description.
#define CODEC_LONG_NAME(str)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
int flags
Additional information about the frame packing.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
enum AVColorPrimaries cicp_primaries
static int png_decode_idat(PNGDecContext *s, GetByteContext *gb, uint8_t *dst, ptrdiff_t dst_stride)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int png_dec_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
@ APNG_DISPOSE_OP_PREVIOUS
#define PNG_COLOR_TYPE_GRAY
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
#define UPDATE_THREAD_CONTEXT(func)
static int decode_sbit_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static void apng_reset_background(PNGDecContext *s, const AVFrame *p)
@ AVCOL_RANGE_UNSPECIFIED
const uint8_t ff_png_pass_ymask[NB_PASSES]
static int output_frame(PNGDecContext *s, AVFrame *f)
static const uint8_t png_pass_mask[NB_PASSES]
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
#define PNG_FILTER_VALUE_NONE
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
#define PNG_FILTER_VALUE_AVG
static AVRational av_make_q(int num, int den)
Create an AVRational.
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define PNG_FILTER_VALUE_PAETH
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
static av_const int av_isgraph(int c)
Locale-independent conversion of ASCII isgraph.
#define PNG_FILTER_VALUE_UP
#define FF_COMPLIANCE_NORMAL
static av_const int av_isdigit(int c)
Locale-independent conversion of ASCII isdigit.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static int populate_avctx_color_fields(AVCodecContext *avctx, AVFrame *frame)
#define FF_THREAD_FRAME
Decode more than one frame at once.
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
AVDictionary * frame_metadata
void(* add_bytes_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w)
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
static int decode_exif_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
uint32_t display_primaries[3][2]
void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, const uint8_t *src, const uint8_t *last, int size, int bpp)
uint16_t mdcv_primaries[3][2]
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static void png_handle_row(PNGDecContext *s, uint8_t *dst, ptrdiff_t dst_stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
const uint8_t * buffer_end
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
void(* add_paeth_prediction)(uint8_t *dst, const uint8_t *src, const uint8_t *top, int w, int bpp)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
enum AVStereo3DType type
How views are packed within the video.
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
static char * iso88591_to_utf8(const char *in, size_t size_in)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, const AVPacket *avpkt)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define UNROLL_FILTER(op)
ProgressFrame last_picture
uint8_t transparent_color_be[6]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
static const uint8_t png_pass_dsp_mask[NB_PASSES]
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
#define PNG_COLOR_MASK_PALETTE
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
static int decode_text_to_exif(PNGDecContext *s, const char *txt_utf8)
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb)
A reference to a data buffer.
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
The ProgressFrame structure.
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
void ff_png_add_paeth_prediction(uint8_t *dst, const uint8_t *src, const uint8_t *top, int w, int bpp)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
enum AVColorTransferCharacteristic cicp_trc
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVColorRange
Visual content value range.
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end, void *logctx)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define PNG_COLOR_TYPE_PALETTE
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define av_fourcc2str(fourcc)