FFmpeg
pngdec.c
Go to the documentation of this file.
1 /*
2  * PNG image format
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 //#define DEBUG
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/bprint.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/stereo3d.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 #include "apng.h"
34 #include "png.h"
35 #include "pngdsp.h"
36 #include "thread.h"
37 
38 #include <zlib.h>
39 
41  PNG_IHDR = 1 << 0,
42  PNG_PLTE = 1 << 1,
43 };
44 
46  PNG_IDAT = 1 << 0,
47  PNG_ALLIMAGE = 1 << 1,
48 };
49 
50 typedef struct PNGDecContext {
53 
58 
61  int width, height;
62  int cur_w, cur_h;
63  int last_w, last_h;
68  int bit_depth;
73  int channels;
75  int bpp;
76  int has_trns;
78 
81  uint32_t palette[256];
84  unsigned int last_row_size;
86  unsigned int tmp_row_size;
89  int pass;
90  int crow_size; /* compressed row size (include filter type) */
91  int row_size; /* decompressed row size */
92  int pass_row_size; /* decompress row size of the current pass */
93  int y;
94  z_stream zstream;
96 
97 /* Mask to determine which pixels are valid in a pass */
98 static const uint8_t png_pass_mask[NB_PASSES] = {
99  0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
100 };
101 
102 /* Mask to determine which y pixels can be written in a pass */
104  0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
105 };
106 
107 /* Mask to determine which pixels to overwrite while displaying */
109  0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
110 };
111 
112 /* NOTE: we try to construct a good looking image at each pass. width
113  * is the original image width. We also do pixel format conversion at
114  * this stage */
115 static void png_put_interlaced_row(uint8_t *dst, int width,
116  int bits_per_pixel, int pass,
117  int color_type, const uint8_t *src)
118 {
119  int x, mask, dsp_mask, j, src_x, b, bpp;
120  uint8_t *d;
121  const uint8_t *s;
122 
124  dsp_mask = png_pass_dsp_mask[pass];
125 
126  switch (bits_per_pixel) {
127  case 1:
128  src_x = 0;
129  for (x = 0; x < width; x++) {
130  j = (x & 7);
131  if ((dsp_mask << j) & 0x80) {
132  b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
133  dst[x >> 3] &= 0xFF7F>>j;
134  dst[x >> 3] |= b << (7 - j);
135  }
136  if ((mask << j) & 0x80)
137  src_x++;
138  }
139  break;
140  case 2:
141  src_x = 0;
142  for (x = 0; x < width; x++) {
143  int j2 = 2 * (x & 3);
144  j = (x & 7);
145  if ((dsp_mask << j) & 0x80) {
146  b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
147  dst[x >> 2] &= 0xFF3F>>j2;
148  dst[x >> 2] |= b << (6 - j2);
149  }
150  if ((mask << j) & 0x80)
151  src_x++;
152  }
153  break;
154  case 4:
155  src_x = 0;
156  for (x = 0; x < width; x++) {
157  int j2 = 4*(x&1);
158  j = (x & 7);
159  if ((dsp_mask << j) & 0x80) {
160  b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
161  dst[x >> 1] &= 0xFF0F>>j2;
162  dst[x >> 1] |= b << (4 - j2);
163  }
164  if ((mask << j) & 0x80)
165  src_x++;
166  }
167  break;
168  default:
169  bpp = bits_per_pixel >> 3;
170  d = dst;
171  s = src;
172  for (x = 0; x < width; x++) {
173  j = x & 7;
174  if ((dsp_mask << j) & 0x80) {
175  memcpy(d, s, bpp);
176  }
177  d += bpp;
178  if ((mask << j) & 0x80)
179  s += bpp;
180  }
181  break;
182  }
183 }
184 
186  int w, int bpp)
187 {
188  int i;
189  for (i = 0; i < w; i++) {
190  int a, b, c, p, pa, pb, pc;
191 
192  a = dst[i - bpp];
193  b = top[i];
194  c = top[i - bpp];
195 
196  p = b - c;
197  pc = a - c;
198 
199  pa = abs(p);
200  pb = abs(pc);
201  pc = abs(p + pc);
202 
203  if (pa <= pb && pa <= pc)
204  p = a;
205  else if (pb <= pc)
206  p = b;
207  else
208  p = c;
209  dst[i] = p + src[i];
210  }
211 }
212 
213 #define UNROLL1(bpp, op) \
214  { \
215  r = dst[0]; \
216  if (bpp >= 2) \
217  g = dst[1]; \
218  if (bpp >= 3) \
219  b = dst[2]; \
220  if (bpp >= 4) \
221  a = dst[3]; \
222  for (; i <= size - bpp; i += bpp) { \
223  dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
224  if (bpp == 1) \
225  continue; \
226  dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
227  if (bpp == 2) \
228  continue; \
229  dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
230  if (bpp == 3) \
231  continue; \
232  dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
233  } \
234  }
235 
236 #define UNROLL_FILTER(op) \
237  if (bpp == 1) { \
238  UNROLL1(1, op) \
239  } else if (bpp == 2) { \
240  UNROLL1(2, op) \
241  } else if (bpp == 3) { \
242  UNROLL1(3, op) \
243  } else if (bpp == 4) { \
244  UNROLL1(4, op) \
245  } \
246  for (; i < size; i++) { \
247  dst[i] = op(dst[i - bpp], src[i], last[i]); \
248  }
249 
250 /* NOTE: 'dst' can be equal to 'last' */
251 static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
252  uint8_t *src, uint8_t *last, int size, int bpp)
253 {
254  int i, p, r, g, b, a;
255 
256  switch (filter_type) {
258  memcpy(dst, src, size);
259  break;
261  for (i = 0; i < bpp; i++)
262  dst[i] = src[i];
263  if (bpp == 4) {
264  p = *(int *)dst;
265  for (; i < size; i += bpp) {
266  unsigned s = *(int *)(src + i);
267  p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
268  *(int *)(dst + i) = p;
269  }
270  } else {
271 #define OP_SUB(x, s, l) ((x) + (s))
273  }
274  break;
275  case PNG_FILTER_VALUE_UP:
276  dsp->add_bytes_l2(dst, src, last, size);
277  break;
279  for (i = 0; i < bpp; i++) {
280  p = (last[i] >> 1);
281  dst[i] = p + src[i];
282  }
283 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
285  break;
287  for (i = 0; i < bpp; i++) {
288  p = last[i];
289  dst[i] = p + src[i];
290  }
291  if (bpp > 2 && size > 4) {
292  /* would write off the end of the array if we let it process
293  * the last pixel with bpp=3 */
294  int w = (bpp & 3) ? size - 3 : size;
295 
296  if (w > i) {
297  dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
298  i = w;
299  }
300  }
301  ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
302  break;
303  }
304 }
305 
306 /* This used to be called "deloco" in FFmpeg
307  * and is actually an inverse reversible colorspace transformation */
308 #define YUV2RGB(NAME, TYPE) \
309 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
310 { \
311  int i; \
312  for (i = 0; i < size; i += 3 + alpha) { \
313  int g = dst [i + 1]; \
314  dst[i + 0] += g; \
315  dst[i + 2] += g; \
316  } \
317 }
318 
319 YUV2RGB(rgb8, uint8_t)
320 YUV2RGB(rgb16, uint16_t)
321 
322 /* process exactly one decompressed row */
324 {
325  uint8_t *ptr, *last_row;
326  int got_line;
327 
328  if (!s->interlace_type) {
329  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
330  if (s->y == 0)
331  last_row = s->last_row;
332  else
333  last_row = ptr - s->image_linesize;
334 
335  png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
336  last_row, s->row_size, s->bpp);
337  /* loco lags by 1 row so that it doesn't interfere with top prediction */
338  if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
339  if (s->bit_depth == 16) {
340  deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
341  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
342  } else {
343  deloco_rgb8(ptr - s->image_linesize, s->row_size,
344  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
345  }
346  }
347  s->y++;
348  if (s->y == s->cur_h) {
349  s->pic_state |= PNG_ALLIMAGE;
350  if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
351  if (s->bit_depth == 16) {
352  deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
353  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
354  } else {
355  deloco_rgb8(ptr, s->row_size,
356  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
357  }
358  }
359  }
360  } else {
361  got_line = 0;
362  for (;;) {
363  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
364  if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
365  /* if we already read one row, it is time to stop to
366  * wait for the next one */
367  if (got_line)
368  break;
369  png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
370  s->last_row, s->pass_row_size, s->bpp);
371  FFSWAP(uint8_t *, s->last_row, s->tmp_row);
372  FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
373  got_line = 1;
374  }
375  if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
376  png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
377  s->color_type, s->last_row);
378  }
379  s->y++;
380  if (s->y == s->cur_h) {
381  memset(s->last_row, 0, s->row_size);
382  for (;;) {
383  if (s->pass == NB_PASSES - 1) {
384  s->pic_state |= PNG_ALLIMAGE;
385  goto the_end;
386  } else {
387  s->pass++;
388  s->y = 0;
389  s->pass_row_size = ff_png_pass_row_size(s->pass,
390  s->bits_per_pixel,
391  s->cur_w);
392  s->crow_size = s->pass_row_size + 1;
393  if (s->pass_row_size != 0)
394  break;
395  /* skip pass if empty row */
396  }
397  }
398  }
399  }
400 the_end:;
401  }
402 }
403 
405 {
406  int ret;
407  s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
408  s->zstream.next_in = (unsigned char *)s->gb.buffer;
409  bytestream2_skip(&s->gb, length);
410 
411  /* decode one line if possible */
412  while (s->zstream.avail_in > 0) {
413  ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
414  if (ret != Z_OK && ret != Z_STREAM_END) {
415  av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
416  return AVERROR_EXTERNAL;
417  }
418  if (s->zstream.avail_out == 0) {
419  if (!(s->pic_state & PNG_ALLIMAGE)) {
420  png_handle_row(s);
421  }
422  s->zstream.avail_out = s->crow_size;
423  s->zstream.next_out = s->crow_buf;
424  }
425  if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
427  "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
428  return 0;
429  }
430  }
431  return 0;
432 }
433 
434 static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
435  const uint8_t *data_end)
436 {
437  z_stream zstream;
438  unsigned char *buf;
439  unsigned buf_size;
440  int ret;
441 
442  zstream.zalloc = ff_png_zalloc;
443  zstream.zfree = ff_png_zfree;
444  zstream.opaque = NULL;
445  if (inflateInit(&zstream) != Z_OK)
446  return AVERROR_EXTERNAL;
447  zstream.next_in = (unsigned char *)data;
448  zstream.avail_in = data_end - data;
450 
451  while (zstream.avail_in > 0) {
452  av_bprint_get_buffer(bp, 2, &buf, &buf_size);
453  if (buf_size < 2) {
454  ret = AVERROR(ENOMEM);
455  goto fail;
456  }
457  zstream.next_out = buf;
458  zstream.avail_out = buf_size - 1;
459  ret = inflate(&zstream, Z_PARTIAL_FLUSH);
460  if (ret != Z_OK && ret != Z_STREAM_END) {
462  goto fail;
463  }
464  bp->len += zstream.next_out - buf;
465  if (ret == Z_STREAM_END)
466  break;
467  }
468  inflateEnd(&zstream);
469  bp->str[bp->len] = 0;
470  return 0;
471 
472 fail:
473  inflateEnd(&zstream);
475  return ret;
476 }
477 
478 static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
479 {
480  size_t extra = 0, i;
481  uint8_t *out, *q;
482 
483  for (i = 0; i < size_in; i++)
484  extra += in[i] >= 0x80;
485  if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
486  return NULL;
487  q = out = av_malloc(size_in + extra + 1);
488  if (!out)
489  return NULL;
490  for (i = 0; i < size_in; i++) {
491  if (in[i] >= 0x80) {
492  *(q++) = 0xC0 | (in[i] >> 6);
493  *(q++) = 0x80 | (in[i] & 0x3F);
494  } else {
495  *(q++) = in[i];
496  }
497  }
498  *(q++) = 0;
499  return out;
500 }
501 
502 static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
503  AVDictionary **dict)
504 {
505  int ret, method;
506  const uint8_t *data = s->gb.buffer;
507  const uint8_t *data_end = data + length;
508  const uint8_t *keyword = data;
509  const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
510  uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
511  unsigned text_len;
512  AVBPrint bp;
513 
514  if (!keyword_end)
515  return AVERROR_INVALIDDATA;
516  data = keyword_end + 1;
517 
518  if (compressed) {
519  if (data == data_end)
520  return AVERROR_INVALIDDATA;
521  method = *(data++);
522  if (method)
523  return AVERROR_INVALIDDATA;
524  if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
525  return ret;
526  text_len = bp.len;
527  ret = av_bprint_finalize(&bp, (char **)&text);
528  if (ret < 0)
529  return ret;
530  } else {
531  text = (uint8_t *)data;
532  text_len = data_end - text;
533  }
534 
535  kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
536  txt_utf8 = iso88591_to_utf8(text, text_len);
537  if (text != data)
538  av_free(text);
539  if (!(kw_utf8 && txt_utf8)) {
540  av_free(kw_utf8);
541  av_free(txt_utf8);
542  return AVERROR(ENOMEM);
543  }
544 
545  av_dict_set(dict, kw_utf8, txt_utf8,
547  return 0;
548 }
549 
551  uint32_t length)
552 {
553  if (length != 13)
554  return AVERROR_INVALIDDATA;
555 
556  if (s->pic_state & PNG_IDAT) {
557  av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
558  return AVERROR_INVALIDDATA;
559  }
560 
561  if (s->hdr_state & PNG_IHDR) {
562  av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
563  return AVERROR_INVALIDDATA;
564  }
565 
566  s->width = s->cur_w = bytestream2_get_be32(&s->gb);
567  s->height = s->cur_h = bytestream2_get_be32(&s->gb);
568  if (av_image_check_size(s->width, s->height, 0, avctx)) {
569  s->cur_w = s->cur_h = s->width = s->height = 0;
570  av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
571  return AVERROR_INVALIDDATA;
572  }
573  s->bit_depth = bytestream2_get_byte(&s->gb);
574  if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
575  s->bit_depth != 8 && s->bit_depth != 16) {
576  av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
577  goto error;
578  }
579  s->color_type = bytestream2_get_byte(&s->gb);
580  s->compression_type = bytestream2_get_byte(&s->gb);
581  if (s->compression_type) {
582  av_log(avctx, AV_LOG_ERROR, "Invalid compression method %d\n", s->compression_type);
583  goto error;
584  }
585  s->filter_type = bytestream2_get_byte(&s->gb);
586  s->interlace_type = bytestream2_get_byte(&s->gb);
587  bytestream2_skip(&s->gb, 4); /* crc */
588  s->hdr_state |= PNG_IHDR;
589  if (avctx->debug & FF_DEBUG_PICT_INFO)
590  av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
591  "compression_type=%d filter_type=%d interlace_type=%d\n",
592  s->width, s->height, s->bit_depth, s->color_type,
593  s->compression_type, s->filter_type, s->interlace_type);
594 
595  return 0;
596 error:
597  s->cur_w = s->cur_h = s->width = s->height = 0;
598  s->bit_depth = 8;
599  return AVERROR_INVALIDDATA;
600 }
601 
603 {
604  if (s->pic_state & PNG_IDAT) {
605  av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
606  return AVERROR_INVALIDDATA;
607  }
608  avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
609  avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
610  if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
611  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
612  bytestream2_skip(&s->gb, 1); /* unit specifier */
613  bytestream2_skip(&s->gb, 4); /* crc */
614 
615  return 0;
616 }
617 
619  uint32_t length, AVFrame *p)
620 {
621  int ret;
622  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
623 
624  if (!(s->hdr_state & PNG_IHDR)) {
625  av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
626  return AVERROR_INVALIDDATA;
627  }
628  if (!(s->pic_state & PNG_IDAT)) {
629  /* init image info */
630  ret = ff_set_dimensions(avctx, s->width, s->height);
631  if (ret < 0)
632  return ret;
633 
634  s->channels = ff_png_get_nb_channels(s->color_type);
635  s->bits_per_pixel = s->bit_depth * s->channels;
636  s->bpp = (s->bits_per_pixel + 7) >> 3;
637  s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
638 
639  if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
640  s->color_type == PNG_COLOR_TYPE_RGB) {
641  avctx->pix_fmt = AV_PIX_FMT_RGB24;
642  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
643  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
644  avctx->pix_fmt = AV_PIX_FMT_RGBA;
645  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
646  s->color_type == PNG_COLOR_TYPE_GRAY) {
647  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
648  } else if (s->bit_depth == 16 &&
649  s->color_type == PNG_COLOR_TYPE_GRAY) {
650  avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
651  } else if (s->bit_depth == 16 &&
652  s->color_type == PNG_COLOR_TYPE_RGB) {
653  avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
654  } else if (s->bit_depth == 16 &&
655  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
656  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
657  } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
658  s->color_type == PNG_COLOR_TYPE_PALETTE) {
659  avctx->pix_fmt = AV_PIX_FMT_PAL8;
660  } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
661  avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
662  } else if (s->bit_depth == 8 &&
663  s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
664  avctx->pix_fmt = AV_PIX_FMT_YA8;
665  } else if (s->bit_depth == 16 &&
666  s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
667  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
668  } else {
670  "Bit depth %d color type %d",
671  s->bit_depth, s->color_type);
672  return AVERROR_PATCHWELCOME;
673  }
674 
675  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
676  switch (avctx->pix_fmt) {
677  case AV_PIX_FMT_RGB24:
678  avctx->pix_fmt = AV_PIX_FMT_RGBA;
679  break;
680 
681  case AV_PIX_FMT_RGB48BE:
682  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
683  break;
684 
685  case AV_PIX_FMT_GRAY8:
686  avctx->pix_fmt = AV_PIX_FMT_YA8;
687  break;
688 
689  case AV_PIX_FMT_GRAY16BE:
690  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
691  break;
692 
693  default:
694  avpriv_request_sample(avctx, "bit depth %d "
695  "and color type %d with TRNS",
696  s->bit_depth, s->color_type);
697  return AVERROR_INVALIDDATA;
698  }
699 
700  s->bpp += byte_depth;
701  }
702 
703  if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
704  return ret;
705  if (avctx->codec_id == AV_CODEC_ID_APNG && s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
706  ff_thread_release_buffer(avctx, &s->previous_picture);
707  if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
708  return ret;
709  }
711  p->key_frame = 1;
712  p->interlaced_frame = !!s->interlace_type;
713 
714  ff_thread_finish_setup(avctx);
715 
716  /* compute the compressed row size */
717  if (!s->interlace_type) {
718  s->crow_size = s->row_size + 1;
719  } else {
720  s->pass = 0;
721  s->pass_row_size = ff_png_pass_row_size(s->pass,
722  s->bits_per_pixel,
723  s->cur_w);
724  s->crow_size = s->pass_row_size + 1;
725  }
726  ff_dlog(avctx, "row_size=%d crow_size =%d\n",
727  s->row_size, s->crow_size);
728  s->image_buf = p->data[0];
729  s->image_linesize = p->linesize[0];
730  /* copy the palette if needed */
731  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
732  memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
733  /* empty row is used if differencing to the first row */
734  av_fast_padded_mallocz(&s->last_row, &s->last_row_size, s->row_size);
735  if (!s->last_row)
736  return AVERROR_INVALIDDATA;
737  if (s->interlace_type ||
738  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
739  av_fast_padded_malloc(&s->tmp_row, &s->tmp_row_size, s->row_size);
740  if (!s->tmp_row)
741  return AVERROR_INVALIDDATA;
742  }
743  /* compressed row */
744  av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
745  if (!s->buffer)
746  return AVERROR(ENOMEM);
747 
748  /* we want crow_buf+1 to be 16-byte aligned */
749  s->crow_buf = s->buffer + 15;
750  s->zstream.avail_out = s->crow_size;
751  s->zstream.next_out = s->crow_buf;
752  }
753 
754  s->pic_state |= PNG_IDAT;
755 
756  /* set image to non-transparent bpp while decompressing */
757  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
758  s->bpp -= byte_depth;
759 
761 
762  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
763  s->bpp += byte_depth;
764 
765  if (ret < 0)
766  return ret;
767 
768  bytestream2_skip(&s->gb, 4); /* crc */
769 
770  return 0;
771 }
772 
774  uint32_t length)
775 {
776  int n, i, r, g, b;
777 
778  if ((length % 3) != 0 || length > 256 * 3)
779  return AVERROR_INVALIDDATA;
780  /* read the palette */
781  n = length / 3;
782  for (i = 0; i < n; i++) {
783  r = bytestream2_get_byte(&s->gb);
784  g = bytestream2_get_byte(&s->gb);
785  b = bytestream2_get_byte(&s->gb);
786  s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
787  }
788  for (; i < 256; i++)
789  s->palette[i] = (0xFFU << 24);
790  s->hdr_state |= PNG_PLTE;
791  bytestream2_skip(&s->gb, 4); /* crc */
792 
793  return 0;
794 }
795 
797  uint32_t length)
798 {
799  int v, i;
800 
801  if (!(s->hdr_state & PNG_IHDR)) {
802  av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
803  return AVERROR_INVALIDDATA;
804  }
805 
806  if (s->pic_state & PNG_IDAT) {
807  av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
808  return AVERROR_INVALIDDATA;
809  }
810 
811  if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
812  if (length > 256 || !(s->hdr_state & PNG_PLTE))
813  return AVERROR_INVALIDDATA;
814 
815  for (i = 0; i < length; i++) {
816  unsigned v = bytestream2_get_byte(&s->gb);
817  s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
818  }
819  } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
820  if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
821  (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
822  s->bit_depth == 1)
823  return AVERROR_INVALIDDATA;
824 
825  for (i = 0; i < length / 2; i++) {
826  /* only use the least significant bits */
827  v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
828 
829  if (s->bit_depth > 8)
830  AV_WB16(&s->transparent_color_be[2 * i], v);
831  else
832  s->transparent_color_be[i] = v;
833  }
834  } else {
835  return AVERROR_INVALIDDATA;
836  }
837 
838  bytestream2_skip(&s->gb, 4); /* crc */
839  s->has_trns = 1;
840 
841  return 0;
842 }
843 
845 {
846  int ret, cnt = 0;
847  uint8_t *data, profile_name[82];
848  AVBPrint bp;
849  AVFrameSideData *sd;
850 
851  while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
852  if (cnt > 80) {
853  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
854  return AVERROR_INVALIDDATA;
855  }
856 
857  length = FFMAX(length - cnt, 0);
858 
859  if (bytestream2_get_byte(&s->gb) != 0) {
860  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
861  return AVERROR_INVALIDDATA;
862  }
863 
864  length = FFMAX(length - 1, 0);
865 
866  if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
867  return ret;
868 
869  ret = av_bprint_finalize(&bp, (char **)&data);
870  if (ret < 0)
871  return ret;
872 
874  if (!sd) {
875  av_free(data);
876  return AVERROR(ENOMEM);
877  }
878 
879  av_dict_set(&sd->metadata, "name", profile_name, 0);
880  memcpy(sd->data, data, bp.len);
881  av_free(data);
882 
883  /* ICC compressed data and CRC */
884  bytestream2_skip(&s->gb, length + 4);
885 
886  return 0;
887 }
888 
890 {
891  if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
892  int i, j, k;
893  uint8_t *pd = p->data[0];
894  for (j = 0; j < s->height; j++) {
895  i = s->width / 8;
896  for (k = 7; k >= 1; k--)
897  if ((s->width&7) >= k)
898  pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
899  for (i--; i >= 0; i--) {
900  pd[8*i + 7]= pd[i] & 1;
901  pd[8*i + 6]= (pd[i]>>1) & 1;
902  pd[8*i + 5]= (pd[i]>>2) & 1;
903  pd[8*i + 4]= (pd[i]>>3) & 1;
904  pd[8*i + 3]= (pd[i]>>4) & 1;
905  pd[8*i + 2]= (pd[i]>>5) & 1;
906  pd[8*i + 1]= (pd[i]>>6) & 1;
907  pd[8*i + 0]= pd[i]>>7;
908  }
909  pd += s->image_linesize;
910  }
911  } else if (s->bits_per_pixel == 2) {
912  int i, j;
913  uint8_t *pd = p->data[0];
914  for (j = 0; j < s->height; j++) {
915  i = s->width / 4;
916  if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
917  if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
918  if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
919  if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
920  for (i--; i >= 0; i--) {
921  pd[4*i + 3]= pd[i] & 3;
922  pd[4*i + 2]= (pd[i]>>2) & 3;
923  pd[4*i + 1]= (pd[i]>>4) & 3;
924  pd[4*i + 0]= pd[i]>>6;
925  }
926  } else {
927  if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
928  if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
929  if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
930  for (i--; i >= 0; i--) {
931  pd[4*i + 3]= ( pd[i] & 3)*0x55;
932  pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
933  pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
934  pd[4*i + 0]= ( pd[i]>>6 )*0x55;
935  }
936  }
937  pd += s->image_linesize;
938  }
939  } else if (s->bits_per_pixel == 4) {
940  int i, j;
941  uint8_t *pd = p->data[0];
942  for (j = 0; j < s->height; j++) {
943  i = s->width/2;
944  if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
945  if (s->width&1) pd[2*i+0]= pd[i]>>4;
946  for (i--; i >= 0; i--) {
947  pd[2*i + 1] = pd[i] & 15;
948  pd[2*i + 0] = pd[i] >> 4;
949  }
950  } else {
951  if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
952  for (i--; i >= 0; i--) {
953  pd[2*i + 1] = (pd[i] & 15) * 0x11;
954  pd[2*i + 0] = (pd[i] >> 4) * 0x11;
955  }
956  }
957  pd += s->image_linesize;
958  }
959  }
960 }
961 
963  uint32_t length)
964 {
965  uint32_t sequence_number;
966  int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
967 
968  if (length != 26)
969  return AVERROR_INVALIDDATA;
970 
971  if (!(s->hdr_state & PNG_IHDR)) {
972  av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
973  return AVERROR_INVALIDDATA;
974  }
975 
976  if (s->pic_state & PNG_IDAT) {
977  av_log(avctx, AV_LOG_ERROR, "fctl after IDAT\n");
978  return AVERROR_INVALIDDATA;
979  }
980 
981  s->last_w = s->cur_w;
982  s->last_h = s->cur_h;
983  s->last_x_offset = s->x_offset;
984  s->last_y_offset = s->y_offset;
985  s->last_dispose_op = s->dispose_op;
986 
987  sequence_number = bytestream2_get_be32(&s->gb);
988  cur_w = bytestream2_get_be32(&s->gb);
989  cur_h = bytestream2_get_be32(&s->gb);
990  x_offset = bytestream2_get_be32(&s->gb);
991  y_offset = bytestream2_get_be32(&s->gb);
992  bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
993  dispose_op = bytestream2_get_byte(&s->gb);
994  blend_op = bytestream2_get_byte(&s->gb);
995  bytestream2_skip(&s->gb, 4); /* crc */
996 
997  if (sequence_number == 0 &&
998  (cur_w != s->width ||
999  cur_h != s->height ||
1000  x_offset != 0 ||
1001  y_offset != 0) ||
1002  cur_w <= 0 || cur_h <= 0 ||
1003  x_offset < 0 || y_offset < 0 ||
1004  cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
1005  return AVERROR_INVALIDDATA;
1006 
1007  if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
1008  av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
1009  return AVERROR_INVALIDDATA;
1010  }
1011 
1012  if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
1013  dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
1014  // No previous frame to revert to for the first frame
1015  // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
1016  dispose_op = APNG_DISPOSE_OP_BACKGROUND;
1017  }
1018 
1019  if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
1020  avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
1021  avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1022  avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
1023  avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
1024  avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
1025  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
1026  )) {
1027  // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
1028  blend_op = APNG_BLEND_OP_SOURCE;
1029  }
1030 
1031  s->cur_w = cur_w;
1032  s->cur_h = cur_h;
1033  s->x_offset = x_offset;
1034  s->y_offset = y_offset;
1035  s->dispose_op = dispose_op;
1036  s->blend_op = blend_op;
1037 
1038  return 0;
1039 }
1040 
1042 {
1043  int i, j;
1044  uint8_t *pd = p->data[0];
1045  uint8_t *pd_last = s->last_picture.f->data[0];
1046  int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
1047 
1048  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1049  for (j = 0; j < s->height; j++) {
1050  for (i = 0; i < ls; i++)
1051  pd[i] += pd_last[i];
1052  pd += s->image_linesize;
1053  pd_last += s->image_linesize;
1054  }
1055 }
1056 
1057 // divide by 255 and round to nearest
1058 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
1059 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1060 
1062  AVFrame *p)
1063 {
1064  size_t x, y;
1065  uint8_t *buffer;
1066 
1067  if (s->blend_op == APNG_BLEND_OP_OVER &&
1068  avctx->pix_fmt != AV_PIX_FMT_RGBA &&
1069  avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
1070  avctx->pix_fmt != AV_PIX_FMT_PAL8) {
1071  avpriv_request_sample(avctx, "Blending with pixel format %s",
1072  av_get_pix_fmt_name(avctx->pix_fmt));
1073  return AVERROR_PATCHWELCOME;
1074  }
1075 
1076  buffer = av_malloc_array(s->image_linesize, s->height);
1077  if (!buffer)
1078  return AVERROR(ENOMEM);
1079 
1080 
1081  // Do the disposal operation specified by the last frame on the frame
1082  if (s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
1083  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1084  memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
1085 
1086  if (s->last_dispose_op == APNG_DISPOSE_OP_BACKGROUND)
1087  for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
1088  memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
1089 
1090  memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
1091  ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
1092  } else {
1093  ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
1094  memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
1095  }
1096 
1097  // Perform blending
1098  if (s->blend_op == APNG_BLEND_OP_SOURCE) {
1099  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1100  size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
1101  memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
1102  }
1103  } else { // APNG_BLEND_OP_OVER
1104  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1105  uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
1106  uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
1107  for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
1108  size_t b;
1109  uint8_t foreground_alpha, background_alpha, output_alpha;
1110  uint8_t output[10];
1111 
1112  // Since we might be blending alpha onto alpha, we use the following equations:
1113  // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
1114  // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
1115 
1116  switch (avctx->pix_fmt) {
1117  case AV_PIX_FMT_RGBA:
1118  foreground_alpha = foreground[3];
1119  background_alpha = background[3];
1120  break;
1121 
1122  case AV_PIX_FMT_GRAY8A:
1123  foreground_alpha = foreground[1];
1124  background_alpha = background[1];
1125  break;
1126 
1127  case AV_PIX_FMT_PAL8:
1128  foreground_alpha = s->palette[foreground[0]] >> 24;
1129  background_alpha = s->palette[background[0]] >> 24;
1130  break;
1131  }
1132 
1133  if (foreground_alpha == 0)
1134  continue;
1135 
1136  if (foreground_alpha == 255) {
1137  memcpy(background, foreground, s->bpp);
1138  continue;
1139  }
1140 
1141  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1142  // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
1143  avpriv_request_sample(avctx, "Alpha blending palette samples");
1144  background[0] = foreground[0];
1145  continue;
1146  }
1147 
1148  output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
1149 
1150  av_assert0(s->bpp <= 10);
1151 
1152  for (b = 0; b < s->bpp - 1; ++b) {
1153  if (output_alpha == 0) {
1154  output[b] = 0;
1155  } else if (background_alpha == 255) {
1156  output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
1157  } else {
1158  output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
1159  }
1160  }
1161  output[b] = output_alpha;
1162  memcpy(background, output, s->bpp);
1163  }
1164  }
1165  }
1166 
1167  // Copy blended buffer into the frame and free
1168  memcpy(p->data[0], buffer, s->image_linesize * s->height);
1169  av_free(buffer);
1170 
1171  return 0;
1172 }
1173 
1175  AVFrame *p, AVPacket *avpkt)
1176 {
1177  AVDictionary **metadatap = NULL;
1178  uint32_t tag, length;
1179  int decode_next_dat = 0;
1180  int i, ret;
1181 
1182  for (;;) {
1184  if (length <= 0) {
1185 
1186  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1187  avctx->skip_frame == AVDISCARD_ALL) {
1188  return 0;
1189  }
1190 
1191  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
1192  if (!(s->pic_state & PNG_IDAT))
1193  return 0;
1194  else
1195  goto exit_loop;
1196  }
1197  av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
1198  if ( s->pic_state & PNG_ALLIMAGE
1200  goto exit_loop;
1202  goto fail;
1203  }
1204 
1205  length = bytestream2_get_be32(&s->gb);
1206  if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
1207  av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
1209  goto fail;
1210  }
1211  tag = bytestream2_get_le32(&s->gb);
1212  if (avctx->debug & FF_DEBUG_STARTCODE)
1213  av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
1215 
1216  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1217  avctx->skip_frame == AVDISCARD_ALL) {
1218  switch(tag) {
1219  case MKTAG('I', 'H', 'D', 'R'):
1220  case MKTAG('p', 'H', 'Y', 's'):
1221  case MKTAG('t', 'E', 'X', 't'):
1222  case MKTAG('I', 'D', 'A', 'T'):
1223  case MKTAG('t', 'R', 'N', 'S'):
1224  break;
1225  default:
1226  goto skip_tag;
1227  }
1228  }
1229 
1230  metadatap = &p->metadata;
1231  switch (tag) {
1232  case MKTAG('I', 'H', 'D', 'R'):
1233  if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
1234  goto fail;
1235  break;
1236  case MKTAG('p', 'H', 'Y', 's'):
1237  if ((ret = decode_phys_chunk(avctx, s)) < 0)
1238  goto fail;
1239  break;
1240  case MKTAG('f', 'c', 'T', 'L'):
1241  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1242  goto skip_tag;
1243  if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
1244  goto fail;
1245  decode_next_dat = 1;
1246  break;
1247  case MKTAG('f', 'd', 'A', 'T'):
1248  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1249  goto skip_tag;
1250  if (!decode_next_dat || length < 4) {
1252  goto fail;
1253  }
1254  bytestream2_get_be32(&s->gb);
1255  length -= 4;
1256  /* fallthrough */
1257  case MKTAG('I', 'D', 'A', 'T'):
1258  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
1259  goto skip_tag;
1260  if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
1261  goto fail;
1262  break;
1263  case MKTAG('P', 'L', 'T', 'E'):
1264  if (decode_plte_chunk(avctx, s, length) < 0)
1265  goto skip_tag;
1266  break;
1267  case MKTAG('t', 'R', 'N', 'S'):
1268  if (decode_trns_chunk(avctx, s, length) < 0)
1269  goto skip_tag;
1270  break;
1271  case MKTAG('t', 'E', 'X', 't'):
1272  if (decode_text_chunk(s, length, 0, metadatap) < 0)
1273  av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
1274  bytestream2_skip(&s->gb, length + 4);
1275  break;
1276  case MKTAG('z', 'T', 'X', 't'):
1277  if (decode_text_chunk(s, length, 1, metadatap) < 0)
1278  av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
1279  bytestream2_skip(&s->gb, length + 4);
1280  break;
1281  case MKTAG('s', 'T', 'E', 'R'): {
1282  int mode = bytestream2_get_byte(&s->gb);
1284  if (!stereo3d)
1285  goto fail;
1286 
1287  if (mode == 0 || mode == 1) {
1288  stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1289  stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
1290  } else {
1291  av_log(avctx, AV_LOG_WARNING,
1292  "Unknown value in sTER chunk (%d)\n", mode);
1293  }
1294  bytestream2_skip(&s->gb, 4); /* crc */
1295  break;
1296  }
1297  case MKTAG('i', 'C', 'C', 'P'): {
1298  if ((ret = decode_iccp_chunk(s, length, p)) < 0)
1299  goto fail;
1300  break;
1301  }
1302  case MKTAG('c', 'H', 'R', 'M'): {
1304  if (!mdm) {
1305  ret = AVERROR(ENOMEM);
1306  goto fail;
1307  }
1308 
1309  mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1310  mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1311 
1312  /* RGB Primaries */
1313  for (i = 0; i < 3; i++) {
1314  mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1315  mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1316  }
1317 
1318  mdm->has_primaries = 1;
1319  bytestream2_skip(&s->gb, 4); /* crc */
1320  break;
1321  }
1322  case MKTAG('g', 'A', 'M', 'A'): {
1323  AVBPrint bp;
1324  char *gamma_str;
1325  int num = bytestream2_get_be32(&s->gb);
1326 
1328  av_bprintf(&bp, "%i/%i", num, 100000);
1329  ret = av_bprint_finalize(&bp, &gamma_str);
1330  if (ret < 0)
1331  return ret;
1332 
1333  av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
1334 
1335  bytestream2_skip(&s->gb, 4); /* crc */
1336  break;
1337  }
1338  case MKTAG('I', 'E', 'N', 'D'):
1339  if (!(s->pic_state & PNG_ALLIMAGE))
1340  av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
1341  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1343  goto fail;
1344  }
1345  bytestream2_skip(&s->gb, 4); /* crc */
1346  goto exit_loop;
1347  default:
1348  /* skip tag */
1349 skip_tag:
1350  bytestream2_skip(&s->gb, length + 4);
1351  break;
1352  }
1353  }
1354 exit_loop:
1355 
1356  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1357  avctx->skip_frame == AVDISCARD_ALL) {
1358  return 0;
1359  }
1360 
1361  if (s->bits_per_pixel <= 4)
1362  handle_small_bpp(s, p);
1363 
1364  /* apply transparency if needed */
1365  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
1366  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
1367  size_t raw_bpp = s->bpp - byte_depth;
1368  unsigned x, y;
1369 
1370  av_assert0(s->bit_depth > 1);
1371 
1372  for (y = 0; y < s->height; ++y) {
1373  uint8_t *row = &s->image_buf[s->image_linesize * y];
1374 
1375  /* since we're updating in-place, we have to go from right to left */
1376  for (x = s->width; x > 0; --x) {
1377  uint8_t *pixel = &row[s->bpp * (x - 1)];
1378  memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1379 
1380  if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
1381  memset(&pixel[raw_bpp], 0, byte_depth);
1382  } else {
1383  memset(&pixel[raw_bpp], 0xff, byte_depth);
1384  }
1385  }
1386  }
1387  }
1388 
1389  /* handle P-frames only if a predecessor frame is available */
1390  if (s->last_picture.f->data[0]) {
1391  if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
1392  && s->last_picture.f->width == p->width
1393  && s->last_picture.f->height== p->height
1394  && s->last_picture.f->format== p->format
1395  ) {
1396  if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
1397  handle_p_frame_png(s, p);
1398  else if (CONFIG_APNG_DECODER &&
1399  s->previous_picture.f->width == p->width &&
1400  s->previous_picture.f->height== p->height &&
1401  s->previous_picture.f->format== p->format &&
1402  avctx->codec_id == AV_CODEC_ID_APNG &&
1403  (ret = handle_p_frame_apng(avctx, s, p)) < 0)
1404  goto fail;
1405  }
1406  }
1407  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1408  ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
1409 
1410  return 0;
1411 
1412 fail:
1413  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1414  ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
1415  return ret;
1416 }
1417 
1418 #if CONFIG_PNG_DECODER
1419 static int decode_frame_png(AVCodecContext *avctx,
1420  void *data, int *got_frame,
1421  AVPacket *avpkt)
1422 {
1423  PNGDecContext *const s = avctx->priv_data;
1424  const uint8_t *buf = avpkt->data;
1425  int buf_size = avpkt->size;
1426  AVFrame *p;
1427  int64_t sig;
1428  int ret;
1429 
1430  ff_thread_release_buffer(avctx, &s->last_picture);
1431  FFSWAP(ThreadFrame, s->picture, s->last_picture);
1432  p = s->picture.f;
1433 
1434  bytestream2_init(&s->gb, buf, buf_size);
1435 
1436  /* check signature */
1437  sig = bytestream2_get_be64(&s->gb);
1438  if (sig != PNGSIG &&
1439  sig != MNGSIG) {
1440  av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
1441  return AVERROR_INVALIDDATA;
1442  }
1443 
1444  s->y = s->has_trns = 0;
1445  s->hdr_state = 0;
1446  s->pic_state = 0;
1447 
1448  /* init the zlib */
1449  s->zstream.zalloc = ff_png_zalloc;
1450  s->zstream.zfree = ff_png_zfree;
1451  s->zstream.opaque = NULL;
1452  ret = inflateInit(&s->zstream);
1453  if (ret != Z_OK) {
1454  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1455  return AVERROR_EXTERNAL;
1456  }
1457 
1458  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1459  goto the_end;
1460 
1461  if (avctx->skip_frame == AVDISCARD_ALL) {
1462  *got_frame = 0;
1463  ret = bytestream2_tell(&s->gb);
1464  goto the_end;
1465  }
1466 
1467  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1468  goto the_end;
1469 
1470  *got_frame = 1;
1471 
1472  ret = bytestream2_tell(&s->gb);
1473 the_end:
1474  inflateEnd(&s->zstream);
1475  s->crow_buf = NULL;
1476  return ret;
1477 }
1478 #endif
1479 
1480 #if CONFIG_APNG_DECODER
1481 static int decode_frame_apng(AVCodecContext *avctx,
1482  void *data, int *got_frame,
1483  AVPacket *avpkt)
1484 {
1485  PNGDecContext *const s = avctx->priv_data;
1486  int ret;
1487  AVFrame *p;
1488 
1489  ff_thread_release_buffer(avctx, &s->last_picture);
1490  FFSWAP(ThreadFrame, s->picture, s->last_picture);
1491  p = s->picture.f;
1492 
1493  if (!(s->hdr_state & PNG_IHDR)) {
1494  if (!avctx->extradata_size)
1495  return AVERROR_INVALIDDATA;
1496 
1497  /* only init fields, there is no zlib use in extradata */
1498  s->zstream.zalloc = ff_png_zalloc;
1499  s->zstream.zfree = ff_png_zfree;
1500 
1501  bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
1502  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1503  goto end;
1504  }
1505 
1506  /* reset state for a new frame */
1507  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1508  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1510  goto end;
1511  }
1512  s->y = 0;
1513  s->pic_state = 0;
1514  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1515  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1516  goto end;
1517 
1518  if (!(s->pic_state & PNG_ALLIMAGE))
1519  av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
1520  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1522  goto end;
1523  }
1524  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1525  goto end;
1526 
1527  *got_frame = 1;
1528  ret = bytestream2_tell(&s->gb);
1529 
1530 end:
1531  inflateEnd(&s->zstream);
1532  return ret;
1533 }
1534 #endif
1535 
1536 #if CONFIG_LSCR_DECODER
1537 static int decode_frame_lscr(AVCodecContext *avctx,
1538  void *data, int *got_frame,
1539  AVPacket *avpkt)
1540 {
1541  PNGDecContext *const s = avctx->priv_data;
1542  GetByteContext *gb = &s->gb;
1543  AVFrame *frame = data;
1544  int ret, nb_blocks, offset = 0;
1545 
1546  if (avpkt->size < 2)
1547  return AVERROR_INVALIDDATA;
1548 
1549  bytestream2_init(gb, avpkt->data, avpkt->size);
1550 
1551  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1552  return ret;
1553 
1554  nb_blocks = bytestream2_get_le16(gb);
1555  if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * (12 + 8))
1556  return AVERROR_INVALIDDATA;
1557 
1558  if (s->last_picture.f->data[0]) {
1559  ret = av_frame_copy(frame, s->last_picture.f);
1560  if (ret < 0)
1561  return ret;
1562  }
1563 
1564  for (int b = 0; b < nb_blocks; b++) {
1565  int x, y, x2, y2, w, h, left;
1566  uint32_t csize, size;
1567 
1568  s->zstream.zalloc = ff_png_zalloc;
1569  s->zstream.zfree = ff_png_zfree;
1570  s->zstream.opaque = NULL;
1571 
1572  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1573  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1575  goto end;
1576  }
1577 
1578  bytestream2_seek(gb, 2 + b * 12, SEEK_SET);
1579 
1580  x = bytestream2_get_le16(gb);
1581  y = bytestream2_get_le16(gb);
1582  x2 = bytestream2_get_le16(gb);
1583  y2 = bytestream2_get_le16(gb);
1584  s->width = s->cur_w = w = x2-x;
1585  s->height = s->cur_h = h = y2-y;
1586 
1587  if (w <= 0 || x < 0 || x >= avctx->width || w + x > avctx->width ||
1588  h <= 0 || y < 0 || y >= avctx->height || h + y > avctx->height) {
1590  goto end;
1591  }
1592 
1593  size = bytestream2_get_le32(gb);
1594 
1595  frame->key_frame = (nb_blocks == 1) &&
1596  (w == avctx->width) &&
1597  (h == avctx->height) &&
1598  (x == 0) && (y == 0);
1599 
1600  bytestream2_seek(gb, 2 + nb_blocks * 12 + offset, SEEK_SET);
1601  csize = bytestream2_get_be32(gb);
1602  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1604  goto end;
1605  }
1606 
1607  offset += size;
1608  left = size;
1609 
1610  s->y = 0;
1611  s->row_size = w * 3;
1612 
1613  av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
1614  if (!s->buffer) {
1615  ret = AVERROR(ENOMEM);
1616  goto end;
1617  }
1618 
1619  av_fast_padded_malloc(&s->last_row, &s->last_row_size, s->row_size);
1620  if (!s->last_row) {
1621  ret = AVERROR(ENOMEM);
1622  goto end;
1623  }
1624 
1625  s->crow_size = w * 3 + 1;
1626  s->crow_buf = s->buffer + 15;
1627  s->zstream.avail_out = s->crow_size;
1628  s->zstream.next_out = s->crow_buf;
1629  s->image_buf = frame->data[0] + (avctx->height - y - 1) * frame->linesize[0] + x * 3;
1630  s->image_linesize =-frame->linesize[0];
1631  s->bpp = 3;
1632  s->pic_state = 0;
1633 
1634  while (left > 16) {
1635  ret = png_decode_idat(s, csize);
1636  if (ret < 0)
1637  goto end;
1638  left -= csize + 16;
1639  if (left > 16) {
1640  bytestream2_skip(gb, 4);
1641  csize = bytestream2_get_be32(gb);
1642  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1644  goto end;
1645  }
1646  }
1647  }
1648 
1649  inflateEnd(&s->zstream);
1650  }
1651 
1652  frame->pict_type = frame->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1653 
1654  av_frame_unref(s->last_picture.f);
1655  if ((ret = av_frame_ref(s->last_picture.f, frame)) < 0)
1656  return ret;
1657 
1658  *got_frame = 1;
1659 end:
1660  inflateEnd(&s->zstream);
1661 
1662  if (ret < 0)
1663  return ret;
1664  return avpkt->size;
1665 }
1666 
1667 static void decode_flush(AVCodecContext *avctx)
1668 {
1669  PNGDecContext *s = avctx->priv_data;
1670 
1671  av_frame_unref(s->last_picture.f);
1672 }
1673 
1674 #endif
1675 
1676 #if HAVE_THREADS
1677 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1678 {
1679  PNGDecContext *psrc = src->priv_data;
1680  PNGDecContext *pdst = dst->priv_data;
1681  int ret;
1682 
1683  if (dst == src)
1684  return 0;
1685 
1686  ff_thread_release_buffer(dst, &pdst->picture);
1687  if (psrc->picture.f->data[0] &&
1688  (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
1689  return ret;
1690  if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
1691  pdst->width = psrc->width;
1692  pdst->height = psrc->height;
1693  pdst->bit_depth = psrc->bit_depth;
1694  pdst->color_type = psrc->color_type;
1695  pdst->compression_type = psrc->compression_type;
1696  pdst->interlace_type = psrc->interlace_type;
1697  pdst->filter_type = psrc->filter_type;
1698  pdst->cur_w = psrc->cur_w;
1699  pdst->cur_h = psrc->cur_h;
1700  pdst->x_offset = psrc->x_offset;
1701  pdst->y_offset = psrc->y_offset;
1702  pdst->has_trns = psrc->has_trns;
1703  memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
1704 
1705  pdst->dispose_op = psrc->dispose_op;
1706 
1707  memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
1708 
1709  pdst->hdr_state |= psrc->hdr_state;
1710 
1712  if (psrc->last_picture.f->data[0] &&
1713  (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
1714  return ret;
1715 
1717  if (psrc->previous_picture.f->data[0] &&
1718  (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
1719  return ret;
1720  }
1721 
1722  return 0;
1723 }
1724 #endif
1725 
1727 {
1728  PNGDecContext *s = avctx->priv_data;
1729 
1730  avctx->color_range = AVCOL_RANGE_JPEG;
1731 
1732  if (avctx->codec_id == AV_CODEC_ID_LSCR)
1733  avctx->pix_fmt = AV_PIX_FMT_BGR24;
1734 
1735  s->avctx = avctx;
1736  s->previous_picture.f = av_frame_alloc();
1737  s->last_picture.f = av_frame_alloc();
1738  s->picture.f = av_frame_alloc();
1739  if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
1740  av_frame_free(&s->previous_picture.f);
1741  av_frame_free(&s->last_picture.f);
1742  av_frame_free(&s->picture.f);
1743  return AVERROR(ENOMEM);
1744  }
1745 
1746  if (!avctx->internal->is_copy) {
1747  avctx->internal->allocate_progress = 1;
1748  ff_pngdsp_init(&s->dsp);
1749  }
1750 
1751  return 0;
1752 }
1753 
1755 {
1756  PNGDecContext *s = avctx->priv_data;
1757 
1758  ff_thread_release_buffer(avctx, &s->previous_picture);
1759  av_frame_free(&s->previous_picture.f);
1760  ff_thread_release_buffer(avctx, &s->last_picture);
1761  av_frame_free(&s->last_picture.f);
1762  ff_thread_release_buffer(avctx, &s->picture);
1763  av_frame_free(&s->picture.f);
1764  av_freep(&s->buffer);
1765  s->buffer_size = 0;
1766  av_freep(&s->last_row);
1767  s->last_row_size = 0;
1768  av_freep(&s->tmp_row);
1769  s->tmp_row_size = 0;
1770 
1771  return 0;
1772 }
1773 
1774 #if CONFIG_APNG_DECODER
1776  .name = "apng",
1777  .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
1778  .type = AVMEDIA_TYPE_VIDEO,
1779  .id = AV_CODEC_ID_APNG,
1780  .priv_data_size = sizeof(PNGDecContext),
1781  .init = png_dec_init,
1782  .close = png_dec_end,
1783  .decode = decode_frame_apng,
1785  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1786  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1787  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1788 };
1789 #endif
1790 
1791 #if CONFIG_PNG_DECODER
1793  .name = "png",
1794  .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
1795  .type = AVMEDIA_TYPE_VIDEO,
1796  .id = AV_CODEC_ID_PNG,
1797  .priv_data_size = sizeof(PNGDecContext),
1798  .init = png_dec_init,
1799  .close = png_dec_end,
1800  .decode = decode_frame_png,
1802  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1803  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1805 };
1806 #endif
1807 
1808 #if CONFIG_LSCR_DECODER
1810  .name = "lscr",
1811  .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
1812  .type = AVMEDIA_TYPE_VIDEO,
1813  .id = AV_CODEC_ID_LSCR,
1814  .priv_data_size = sizeof(PNGDecContext),
1815  .init = png_dec_init,
1816  .close = png_dec_end,
1817  .decode = decode_frame_lscr,
1818  .flush = decode_flush,
1819  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1821 };
1822 #endif
PNG_PLTE
@ PNG_PLTE
Definition: pngdec.c:42
PNGDSPContext
Definition: pngdsp.h:27
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AVCodec
AVCodec.
Definition: avcodec.h:3481
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
PNGDecContext::last_h
int last_h
Definition: pngdec.c:63
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
ff_add_png_paeth_prediction
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdec.c:185
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
GetByteContext
Definition: bytestream.h:33
PNG_ALLIMAGE
@ PNG_ALLIMAGE
Definition: pngdec.c:47
n
int n
Definition: avisynth_c.h:760
PNGDecContext::last_row_size
unsigned int last_row_size
Definition: pngdec.c:84
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
PNGDecContext::bit_depth
int bit_depth
Definition: pngdec.c:68
ff_png_get_nb_channels
int ff_png_get_nb_channels(int color_type)
Definition: png.c:49
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
PNGDSPContext::add_paeth_prediction
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdsp.h:33
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
PNGDecContext::crow_size
int crow_size
Definition: pngdec.c:90
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
AVFrame::width
int width
Definition: frame.h:353
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
decode_zbuf
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end)
Definition: pngdec.c:434
b
#define b
Definition: input.c:41
PNGDecContext::last_y_offset
int last_y_offset
Definition: pngdec.c:65
data
const char data[16]
Definition: mxf.c:91
decode_ihdr_chunk
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:550
PNGDecContext::row_size
int row_size
Definition: pngdec.c:91
iso88591_to_utf8
static uint8_t * iso88591_to_utf8(const uint8_t *in, size_t size_in)
Definition: pngdec.c:478
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
PNGDecContext::width
int width
Definition: pngdec.c:61
AVDictionary
Definition: dict.c:30
PNGDecContext::previous_picture
ThreadFrame previous_picture
Definition: pngdec.c:55
AV_CODEC_ID_APNG
@ AV_CODEC_ID_APNG
Definition: avcodec.h:428
PNGDecContext::tmp_row_size
unsigned int tmp_row_size
Definition: pngdec.c:86
PNGDecContext::color_type
int color_type
Definition: pngdec.c:69
PNGDecContext::cur_h
int cur_h
Definition: pngdec.c:62
ff_png_zfree
void ff_png_zfree(void *opaque, void *ptr)
Definition: png.c:44
PNGDecContext::bits_per_pixel
int bits_per_pixel
Definition: pngdec.c:74
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
NB_PASSES
#define NB_PASSES
Definition: png.h:45
PNGDecContext::blend_op
uint8_t blend_op
Definition: pngdec.c:66
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1261
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
AVCodecInternal::is_copy
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:136
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
PNGDecContext::zstream
z_stream zstream
Definition: pngdec.c:94
FAST_DIV255
#define FAST_DIV255(x)
Definition: pngdec.c:1059
PNGImageState
PNGImageState
Definition: pngdec.c:45
PNG_FILTER_TYPE_LOCO
#define PNG_FILTER_TYPE_LOCO
Definition: png.h:37
U
#define U(x)
Definition: vp56_arith.h:37
PNGDecContext::pass_row_size
int pass_row_size
Definition: pngdec.c:92
ff_png_pass_row_size
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
Definition: png.c:62
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3040
fail
#define fail()
Definition: checkasm.h:120
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:197
png_filter_row
static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
Definition: pngdec.c:251
png_dec_end
static av_cold int png_dec_end(AVCodecContext *avctx)
Definition: pngdec.c:1754
OP_AVG
#define OP_AVG(x, s, l)
PNGDecContext::pic_state
enum PNGImageState pic_state
Definition: pngdec.c:60
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
decode_trns_chunk
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:796
png_pass_dsp_ymask
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
Definition: pngdec.c:103
APNG_DISPOSE_OP_BACKGROUND
@ APNG_DISPOSE_OP_BACKGROUND
Definition: apng.h:32
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
PNG_COLOR_TYPE_RGB_ALPHA
#define PNG_COLOR_TYPE_RGB_ALPHA
Definition: png.h:34
PNGDecContext::crow_buf
uint8_t * crow_buf
Definition: pngdec.c:82
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
PNGDecContext::last_row
uint8_t * last_row
Definition: pngdec.c:83
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
PNGDecContext::height
int height
Definition: pngdec.c:61
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
pngdsp.h
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
skip_tag
static int skip_tag(AVIOContext *in, int32_t tag_name)
Definition: ismindex.c:132
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:557
YUV2RGB
#define YUV2RGB(NAME, TYPE)
Definition: pngdec.c:308
mask
static const uint16_t mask[17]
Definition: lzw.c:38
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
width
#define width
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
PNGDecContext::tmp_row
uint8_t * tmp_row
Definition: pngdec.c:85
s
#define s(width, name)
Definition: cbs_vp9.c:257
PNGDecContext::y_offset
int y_offset
Definition: pngdec.c:64
PNGDecContext::palette
uint32_t palette[256]
Definition: pngdec.c:81
g
const char * g
Definition: vf_curves.c:115
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
decode_fctl_chunk
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:962
png_handle_row
static void png_handle_row(PNGDecContext *s)
Definition: pngdec.c:323
PNG_COLOR_TYPE_RGB
#define PNG_COLOR_TYPE_RGB
Definition: png.h:33
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
PNGDecContext::hdr_state
enum PNGHeaderState hdr_state
Definition: pngdec.c:59
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
f
#define f(width, name)
Definition: cbs_vp9.c:255
pass
#define pass
Definition: fft_template.c:619
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
AV_CODEC_ID_PNG
@ AV_CODEC_ID_PNG
Definition: avcodec.h:279
if
if(ret)
Definition: filter_design.txt:179
PNGDecContext
Definition: pngdec.c:50
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:146
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
PNGDecContext::filter_type
int filter_type
Definition: pngdec.c:72
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
png_dec_init
static av_cold int png_dec_init(AVCodecContext *avctx)
Definition: pngdec.c:1726
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
apng.h
ff_png_decoder
AVCodec ff_png_decoder
pixel
uint8_t pixel
Definition: tiny_ssim.c:42
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
PNGDecContext::channels
int channels
Definition: pngdec.c:73
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
PNG_COLOR_TYPE_GRAY
#define PNG_COLOR_TYPE_GRAY
Definition: png.h:31
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
abs
#define abs(x)
Definition: cuda_runtime.h:35
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. If the codec allocates writable tables in its init()
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_png_pass_ymask
const uint8_t ff_png_pass_ymask[NB_PASSES]
Definition: png.c:25
png_pass_mask
static const uint8_t png_pass_mask[NB_PASSES]
Definition: pngdec.c:98
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
PNGDecContext::pass
int pass
Definition: pngdec.c:89
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
AV_CODEC_ID_LSCR
@ AV_CODEC_ID_LSCR
Definition: avcodec.h:458
handle_small_bpp
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:889
PNG_FILTER_VALUE_NONE
#define PNG_FILTER_VALUE_NONE
Definition: png.h:38
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
decode_idat_chunk
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length, AVFrame *p)
Definition: pngdec.c:618
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
PNGDecContext::gb
GetByteContext gb
Definition: pngdec.c:54
AVPacket::size
int size
Definition: avcodec.h:1478
av_bprint_get_buffer
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
Definition: bprint.c:218
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
handle_p_frame_png
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1041
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
AVCodecInternal::allocate_progress
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
PNGDecContext::last_w
int last_w
Definition: pngdec.c:63
PNGDecContext::picture
ThreadFrame picture
Definition: pngdec.c:57
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
png_put_interlaced_row
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
Definition: pngdec.c:115
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
init_thread_copy
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
PNG_FILTER_VALUE_AVG
#define PNG_FILTER_VALUE_AVG
Definition: png.h:41
size
int size
Definition: twinvq_data.h:11134
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:203
PNG_FILTER_VALUE_PAETH
#define PNG_FILTER_VALUE_PAETH
Definition: png.h:42
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
PNGDecContext::buffer
uint8_t * buffer
Definition: pngdec.c:87
PNG_FILTER_VALUE_UP
#define PNG_FILTER_VALUE_UP
Definition: png.h:40
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
PNGDecContext::dispose_op
uint8_t dispose_op
Definition: pngdec.c:66
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
APNG_BLEND_OP_OVER
@ APNG_BLEND_OP_OVER
Definition: apng.h:38
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2631
PNGSIG
#define PNGSIG
Definition: png.h:47
PNGDecContext::last_x_offset
int last_x_offset
Definition: pngdec.c:65
PNGDecContext::buffer_size
int buffer_size
Definition: pngdec.c:88
decode_text_chunk
static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed, AVDictionary **dict)
Definition: pngdec.c:502
PNGDecContext::image_linesize
int image_linesize
Definition: pngdec.c:80
ff_pngdsp_init
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
Definition: pngdsp.c:43
av_image_get_linesize
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
Definition: imgutils.c:76
decode_iccp_chunk
static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
Definition: pngdec.c:844
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
PNGDSPContext::add_bytes_l2
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
Definition: pngdsp.h:28
PNG_FILTER_VALUE_SUB
#define PNG_FILTER_VALUE_SUB
Definition: png.h:39
bprint.h
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
png_decode_idat
static int png_decode_idat(PNGDecContext *s, int length)
Definition: pngdec.c:404
PNGDecContext::x_offset
int x_offset
Definition: pngdec.c:64
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
PNGDecContext::image_buf
uint8_t * image_buf
Definition: pngdec.c:79
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2664
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
ff_png_zalloc
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
Definition: png.c:39
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
PNGDecContext::avctx
AVCodecContext * avctx
Definition: pngdec.c:52
avcodec.h
PNGHeaderState
PNGHeaderState
Definition: pngdec.c:40
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1496
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2628
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
PNGDecContext::last_picture
ThreadFrame last_picture
Definition: pngdec.c:56
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
APNG_DISPOSE_OP_PREVIOUS
@ APNG_DISPOSE_OP_PREVIOUS
Definition: apng.h:33
PNGDecContext::compression_type
int compression_type
Definition: pngdec.c:70
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
PNG_IHDR
@ PNG_IHDR
Definition: pngdec.c:41
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
PNGDecContext::last_dispose_op
uint8_t last_dispose_op
Definition: pngdec.c:67
decode_plte_chunk
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:773
ThreadFrame
Definition: thread.h:34
ff_lscr_decoder
AVCodec ff_lscr_decoder
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
APNG_BLEND_OP_SOURCE
@ APNG_BLEND_OP_SOURCE
Definition: apng.h:37
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
UNROLL_FILTER
#define UNROLL_FILTER(op)
Definition: pngdec.c:236
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
PNGDecContext::transparent_color_be
uint8_t transparent_color_be[6]
Definition: pngdec.c:77
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:581
av_fast_padded_mallocz
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
Definition: utils.c:82
png_pass_dsp_mask
static const uint8_t png_pass_dsp_mask[NB_PASSES]
Definition: pngdec.c:108
decode_phys_chunk
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
Definition: pngdec.c:602
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:2650
PNG_IDAT
@ PNG_IDAT
Definition: pngdec.c:46
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1861
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
mastering_display_metadata.h
PNGDecContext::dsp
PNGDSPContext dsp
Definition: pngdec.c:51
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1590
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
png.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
PNG_COLOR_TYPE_GRAY_ALPHA
#define PNG_COLOR_TYPE_GRAY_ALPHA
Definition: png.h:35
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:205
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
MNGSIG
#define MNGSIG
Definition: png.h:48
PNGDecContext::interlace_type
int interlace_type
Definition: pngdec.c:71
decode_frame_common
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt)
Definition: pngdec.c:1174
h
h
Definition: vp9dsp_template.c:2038
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
handle_p_frame_apng
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1061
PNGDecContext::y
int y
Definition: pngdec.c:93
ff_apng_decoder
AVCodec ff_apng_decoder
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
OP_SUB
#define OP_SUB(x, s, l)
PNGDecContext::cur_w
int cur_w
Definition: pngdec.c:62
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:1944
PNG_COLOR_TYPE_PALETTE
#define PNG_COLOR_TYPE_PALETTE
Definition: png.h:32
AV_DICT_DONT_STRDUP_KEY
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
Definition: dict.h:72
PNGDecContext::bpp
int bpp
Definition: pngdec.c:75
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
PNGDecContext::has_trns
int has_trns
Definition: pngdec.c:76
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
ff_thread_release_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer(). Otherwise leave it at zero and decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere