FFmpeg
pngdec.c
Go to the documentation of this file.
1 /*
2  * PNG image format
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 //#define DEBUG
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/bprint.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/stereo3d.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 #include "apng.h"
34 #include "png.h"
35 #include "pngdsp.h"
36 #include "thread.h"
37 
38 #include <zlib.h>
39 
41  PNG_IHDR = 1 << 0,
42  PNG_PLTE = 1 << 1,
43 };
44 
46  PNG_IDAT = 1 << 0,
47  PNG_ALLIMAGE = 1 << 1,
48 };
49 
50 typedef struct PNGDecContext {
53 
58 
61  int width, height;
62  int cur_w, cur_h;
63  int last_w, last_h;
68  int bit_depth;
73  int channels;
75  int bpp;
76  int has_trns;
78 
81  uint32_t palette[256];
84  unsigned int last_row_size;
86  unsigned int tmp_row_size;
89  int pass;
90  int crow_size; /* compressed row size (include filter type) */
91  int row_size; /* decompressed row size */
92  int pass_row_size; /* decompress row size of the current pass */
93  int y;
94  z_stream zstream;
96 
97 /* Mask to determine which pixels are valid in a pass */
98 static const uint8_t png_pass_mask[NB_PASSES] = {
99  0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
100 };
101 
102 /* Mask to determine which y pixels can be written in a pass */
104  0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
105 };
106 
107 /* Mask to determine which pixels to overwrite while displaying */
109  0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
110 };
111 
112 /* NOTE: we try to construct a good looking image at each pass. width
113  * is the original image width. We also do pixel format conversion at
114  * this stage */
115 static void png_put_interlaced_row(uint8_t *dst, int width,
116  int bits_per_pixel, int pass,
117  int color_type, const uint8_t *src)
118 {
119  int x, mask, dsp_mask, j, src_x, b, bpp;
120  uint8_t *d;
121  const uint8_t *s;
122 
123  mask = png_pass_mask[pass];
124  dsp_mask = png_pass_dsp_mask[pass];
125 
126  switch (bits_per_pixel) {
127  case 1:
128  src_x = 0;
129  for (x = 0; x < width; x++) {
130  j = (x & 7);
131  if ((dsp_mask << j) & 0x80) {
132  b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
133  dst[x >> 3] &= 0xFF7F>>j;
134  dst[x >> 3] |= b << (7 - j);
135  }
136  if ((mask << j) & 0x80)
137  src_x++;
138  }
139  break;
140  case 2:
141  src_x = 0;
142  for (x = 0; x < width; x++) {
143  int j2 = 2 * (x & 3);
144  j = (x & 7);
145  if ((dsp_mask << j) & 0x80) {
146  b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
147  dst[x >> 2] &= 0xFF3F>>j2;
148  dst[x >> 2] |= b << (6 - j2);
149  }
150  if ((mask << j) & 0x80)
151  src_x++;
152  }
153  break;
154  case 4:
155  src_x = 0;
156  for (x = 0; x < width; x++) {
157  int j2 = 4*(x&1);
158  j = (x & 7);
159  if ((dsp_mask << j) & 0x80) {
160  b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
161  dst[x >> 1] &= 0xFF0F>>j2;
162  dst[x >> 1] |= b << (4 - j2);
163  }
164  if ((mask << j) & 0x80)
165  src_x++;
166  }
167  break;
168  default:
169  bpp = bits_per_pixel >> 3;
170  d = dst;
171  s = src;
172  for (x = 0; x < width; x++) {
173  j = x & 7;
174  if ((dsp_mask << j) & 0x80) {
175  memcpy(d, s, bpp);
176  }
177  d += bpp;
178  if ((mask << j) & 0x80)
179  s += bpp;
180  }
181  break;
182  }
183 }
184 
186  int w, int bpp)
187 {
188  int i;
189  for (i = 0; i < w; i++) {
190  int a, b, c, p, pa, pb, pc;
191 
192  a = dst[i - bpp];
193  b = top[i];
194  c = top[i - bpp];
195 
196  p = b - c;
197  pc = a - c;
198 
199  pa = abs(p);
200  pb = abs(pc);
201  pc = abs(p + pc);
202 
203  if (pa <= pb && pa <= pc)
204  p = a;
205  else if (pb <= pc)
206  p = b;
207  else
208  p = c;
209  dst[i] = p + src[i];
210  }
211 }
212 
213 #define UNROLL1(bpp, op) \
214  { \
215  r = dst[0]; \
216  if (bpp >= 2) \
217  g = dst[1]; \
218  if (bpp >= 3) \
219  b = dst[2]; \
220  if (bpp >= 4) \
221  a = dst[3]; \
222  for (; i <= size - bpp; i += bpp) { \
223  dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
224  if (bpp == 1) \
225  continue; \
226  dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
227  if (bpp == 2) \
228  continue; \
229  dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
230  if (bpp == 3) \
231  continue; \
232  dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
233  } \
234  }
235 
236 #define UNROLL_FILTER(op) \
237  if (bpp == 1) { \
238  UNROLL1(1, op) \
239  } else if (bpp == 2) { \
240  UNROLL1(2, op) \
241  } else if (bpp == 3) { \
242  UNROLL1(3, op) \
243  } else if (bpp == 4) { \
244  UNROLL1(4, op) \
245  } \
246  for (; i < size; i++) { \
247  dst[i] = op(dst[i - bpp], src[i], last[i]); \
248  }
249 
250 /* NOTE: 'dst' can be equal to 'last' */
252  uint8_t *src, uint8_t *last, int size, int bpp)
253 {
254  int i, p, r, g, b, a;
255 
256  switch (filter_type) {
258  memcpy(dst, src, size);
259  break;
261  for (i = 0; i < bpp; i++)
262  dst[i] = src[i];
263  if (bpp == 4) {
264  p = *(int *)dst;
265  for (; i < size; i += bpp) {
266  unsigned s = *(int *)(src + i);
267  p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
268  *(int *)(dst + i) = p;
269  }
270  } else {
271 #define OP_SUB(x, s, l) ((x) + (s))
273  }
274  break;
275  case PNG_FILTER_VALUE_UP:
276  dsp->add_bytes_l2(dst, src, last, size);
277  break;
279  for (i = 0; i < bpp; i++) {
280  p = (last[i] >> 1);
281  dst[i] = p + src[i];
282  }
283 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
285  break;
287  for (i = 0; i < bpp; i++) {
288  p = last[i];
289  dst[i] = p + src[i];
290  }
291  if (bpp > 2 && size > 4) {
292  /* would write off the end of the array if we let it process
293  * the last pixel with bpp=3 */
294  int w = (bpp & 3) ? size - 3 : size;
295 
296  if (w > i) {
297  dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
298  i = w;
299  }
300  }
301  ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
302  break;
303  }
304 }
305 
306 /* This used to be called "deloco" in FFmpeg
307  * and is actually an inverse reversible colorspace transformation */
308 #define YUV2RGB(NAME, TYPE) \
309 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
310 { \
311  int i; \
312  for (i = 0; i < size; i += 3 + alpha) { \
313  int g = dst [i + 1]; \
314  dst[i + 0] += g; \
315  dst[i + 2] += g; \
316  } \
317 }
318 
319 YUV2RGB(rgb8, uint8_t)
320 YUV2RGB(rgb16, uint16_t)
321 
322 /* process exactly one decompressed row */
324 {
325  uint8_t *ptr, *last_row;
326  int got_line;
327 
328  if (!s->interlace_type) {
329  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
330  if (s->y == 0)
331  last_row = s->last_row;
332  else
333  last_row = ptr - s->image_linesize;
334 
335  png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
336  last_row, s->row_size, s->bpp);
337  /* loco lags by 1 row so that it doesn't interfere with top prediction */
338  if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
339  if (s->bit_depth == 16) {
340  deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
341  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
342  } else {
343  deloco_rgb8(ptr - s->image_linesize, s->row_size,
344  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
345  }
346  }
347  s->y++;
348  if (s->y == s->cur_h) {
349  s->pic_state |= PNG_ALLIMAGE;
350  if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
351  if (s->bit_depth == 16) {
352  deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
353  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
354  } else {
355  deloco_rgb8(ptr, s->row_size,
356  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
357  }
358  }
359  }
360  } else {
361  got_line = 0;
362  for (;;) {
363  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
364  if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
365  /* if we already read one row, it is time to stop to
366  * wait for the next one */
367  if (got_line)
368  break;
369  png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
370  s->last_row, s->pass_row_size, s->bpp);
371  FFSWAP(uint8_t *, s->last_row, s->tmp_row);
372  FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
373  got_line = 1;
374  }
375  if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
376  png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
377  s->color_type, s->last_row);
378  }
379  s->y++;
380  if (s->y == s->cur_h) {
381  memset(s->last_row, 0, s->row_size);
382  for (;;) {
383  if (s->pass == NB_PASSES - 1) {
384  s->pic_state |= PNG_ALLIMAGE;
385  goto the_end;
386  } else {
387  s->pass++;
388  s->y = 0;
389  s->pass_row_size = ff_png_pass_row_size(s->pass,
390  s->bits_per_pixel,
391  s->cur_w);
392  s->crow_size = s->pass_row_size + 1;
393  if (s->pass_row_size != 0)
394  break;
395  /* skip pass if empty row */
396  }
397  }
398  }
399  }
400 the_end:;
401  }
402 }
403 
405 {
406  int ret;
407  s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
408  s->zstream.next_in = (unsigned char *)s->gb.buffer;
409  bytestream2_skip(&s->gb, length);
410 
411  /* decode one line if possible */
412  while (s->zstream.avail_in > 0) {
413  ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
414  if (ret != Z_OK && ret != Z_STREAM_END) {
415  av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
416  return AVERROR_EXTERNAL;
417  }
418  if (s->zstream.avail_out == 0) {
419  if (!(s->pic_state & PNG_ALLIMAGE)) {
420  png_handle_row(s);
421  }
422  s->zstream.avail_out = s->crow_size;
423  s->zstream.next_out = s->crow_buf;
424  }
425  if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
427  "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
428  return 0;
429  }
430  }
431  return 0;
432 }
433 
434 static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
435  const uint8_t *data_end)
436 {
437  z_stream zstream;
438  unsigned char *buf;
439  unsigned buf_size;
440  int ret;
441 
442  zstream.zalloc = ff_png_zalloc;
443  zstream.zfree = ff_png_zfree;
444  zstream.opaque = NULL;
445  if (inflateInit(&zstream) != Z_OK)
446  return AVERROR_EXTERNAL;
447  zstream.next_in = (unsigned char *)data;
448  zstream.avail_in = data_end - data;
450 
451  while (zstream.avail_in > 0) {
452  av_bprint_get_buffer(bp, 2, &buf, &buf_size);
453  if (buf_size < 2) {
454  ret = AVERROR(ENOMEM);
455  goto fail;
456  }
457  zstream.next_out = buf;
458  zstream.avail_out = buf_size - 1;
459  ret = inflate(&zstream, Z_PARTIAL_FLUSH);
460  if (ret != Z_OK && ret != Z_STREAM_END) {
461  ret = AVERROR_EXTERNAL;
462  goto fail;
463  }
464  bp->len += zstream.next_out - buf;
465  if (ret == Z_STREAM_END)
466  break;
467  }
468  inflateEnd(&zstream);
469  bp->str[bp->len] = 0;
470  return 0;
471 
472 fail:
473  inflateEnd(&zstream);
475  return ret;
476 }
477 
478 static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
479 {
480  size_t extra = 0, i;
481  uint8_t *out, *q;
482 
483  for (i = 0; i < size_in; i++)
484  extra += in[i] >= 0x80;
485  if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
486  return NULL;
487  q = out = av_malloc(size_in + extra + 1);
488  if (!out)
489  return NULL;
490  for (i = 0; i < size_in; i++) {
491  if (in[i] >= 0x80) {
492  *(q++) = 0xC0 | (in[i] >> 6);
493  *(q++) = 0x80 | (in[i] & 0x3F);
494  } else {
495  *(q++) = in[i];
496  }
497  }
498  *(q++) = 0;
499  return out;
500 }
501 
502 static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
503  AVDictionary **dict)
504 {
505  int ret, method;
506  const uint8_t *data = s->gb.buffer;
507  const uint8_t *data_end = data + length;
508  const uint8_t *keyword = data;
509  const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
510  uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
511  unsigned text_len;
512  AVBPrint bp;
513 
514  if (!keyword_end)
515  return AVERROR_INVALIDDATA;
516  data = keyword_end + 1;
517 
518  if (compressed) {
519  if (data == data_end)
520  return AVERROR_INVALIDDATA;
521  method = *(data++);
522  if (method)
523  return AVERROR_INVALIDDATA;
524  if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
525  return ret;
526  text_len = bp.len;
527  ret = av_bprint_finalize(&bp, (char **)&text);
528  if (ret < 0)
529  return ret;
530  } else {
531  text = (uint8_t *)data;
532  text_len = data_end - text;
533  }
534 
535  kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
536  txt_utf8 = iso88591_to_utf8(text, text_len);
537  if (text != data)
538  av_free(text);
539  if (!(kw_utf8 && txt_utf8)) {
540  av_free(kw_utf8);
541  av_free(txt_utf8);
542  return AVERROR(ENOMEM);
543  }
544 
545  av_dict_set(dict, kw_utf8, txt_utf8,
547  return 0;
548 }
549 
551  uint32_t length)
552 {
553  if (length != 13)
554  return AVERROR_INVALIDDATA;
555 
556  if (s->pic_state & PNG_IDAT) {
557  av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
558  return AVERROR_INVALIDDATA;
559  }
560 
561  if (s->hdr_state & PNG_IHDR) {
562  av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
563  return AVERROR_INVALIDDATA;
564  }
565 
566  s->width = s->cur_w = bytestream2_get_be32(&s->gb);
567  s->height = s->cur_h = bytestream2_get_be32(&s->gb);
568  if (av_image_check_size(s->width, s->height, 0, avctx)) {
569  s->cur_w = s->cur_h = s->width = s->height = 0;
570  av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
571  return AVERROR_INVALIDDATA;
572  }
573  s->bit_depth = bytestream2_get_byte(&s->gb);
574  if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
575  s->bit_depth != 8 && s->bit_depth != 16) {
576  av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
577  goto error;
578  }
579  s->color_type = bytestream2_get_byte(&s->gb);
580  s->compression_type = bytestream2_get_byte(&s->gb);
581  if (s->compression_type) {
582  av_log(avctx, AV_LOG_ERROR, "Invalid compression method %d\n", s->compression_type);
583  goto error;
584  }
585  s->filter_type = bytestream2_get_byte(&s->gb);
586  s->interlace_type = bytestream2_get_byte(&s->gb);
587  bytestream2_skip(&s->gb, 4); /* crc */
588  s->hdr_state |= PNG_IHDR;
589  if (avctx->debug & FF_DEBUG_PICT_INFO)
590  av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
591  "compression_type=%d filter_type=%d interlace_type=%d\n",
592  s->width, s->height, s->bit_depth, s->color_type,
594 
595  return 0;
596 error:
597  s->cur_w = s->cur_h = s->width = s->height = 0;
598  s->bit_depth = 8;
599  return AVERROR_INVALIDDATA;
600 }
601 
603 {
604  if (s->pic_state & PNG_IDAT) {
605  av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
606  return AVERROR_INVALIDDATA;
607  }
608  avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
609  avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
610  if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
611  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
612  bytestream2_skip(&s->gb, 1); /* unit specifier */
613  bytestream2_skip(&s->gb, 4); /* crc */
614 
615  return 0;
616 }
617 
619  uint32_t length, AVFrame *p)
620 {
621  int ret;
622  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
623 
624  if (!(s->hdr_state & PNG_IHDR)) {
625  av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
626  return AVERROR_INVALIDDATA;
627  }
628  if (!(s->pic_state & PNG_IDAT)) {
629  /* init image info */
630  ret = ff_set_dimensions(avctx, s->width, s->height);
631  if (ret < 0)
632  return ret;
633 
635  s->bits_per_pixel = s->bit_depth * s->channels;
636  s->bpp = (s->bits_per_pixel + 7) >> 3;
637  s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
638 
639  if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
641  avctx->pix_fmt = AV_PIX_FMT_RGB24;
642  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
644  avctx->pix_fmt = AV_PIX_FMT_RGBA;
645  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
647  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
648  } else if (s->bit_depth == 16 &&
650  avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
651  } else if (s->bit_depth == 16 &&
653  avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
654  } else if (s->bit_depth == 16 &&
656  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
657  } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
659  avctx->pix_fmt = AV_PIX_FMT_PAL8;
660  } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
661  avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
662  } else if (s->bit_depth == 8 &&
664  avctx->pix_fmt = AV_PIX_FMT_YA8;
665  } else if (s->bit_depth == 16 &&
667  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
668  } else {
670  "Bit depth %d color type %d",
671  s->bit_depth, s->color_type);
672  return AVERROR_PATCHWELCOME;
673  }
674 
675  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
676  switch (avctx->pix_fmt) {
677  case AV_PIX_FMT_RGB24:
678  avctx->pix_fmt = AV_PIX_FMT_RGBA;
679  break;
680 
681  case AV_PIX_FMT_RGB48BE:
682  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
683  break;
684 
685  case AV_PIX_FMT_GRAY8:
686  avctx->pix_fmt = AV_PIX_FMT_YA8;
687  break;
688 
689  case AV_PIX_FMT_GRAY16BE:
690  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
691  break;
692 
693  default:
694  avpriv_request_sample(avctx, "bit depth %d "
695  "and color type %d with TRNS",
696  s->bit_depth, s->color_type);
697  return AVERROR_INVALIDDATA;
698  }
699 
700  s->bpp += byte_depth;
701  }
702 
703  if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
704  return ret;
707  if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
708  return ret;
709  }
711  p->key_frame = 1;
713 
714  ff_thread_finish_setup(avctx);
715 
716  /* compute the compressed row size */
717  if (!s->interlace_type) {
718  s->crow_size = s->row_size + 1;
719  } else {
720  s->pass = 0;
722  s->bits_per_pixel,
723  s->cur_w);
724  s->crow_size = s->pass_row_size + 1;
725  }
726  ff_dlog(avctx, "row_size=%d crow_size =%d\n",
727  s->row_size, s->crow_size);
728  s->image_buf = p->data[0];
729  s->image_linesize = p->linesize[0];
730  /* copy the palette if needed */
731  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
732  memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
733  /* empty row is used if differencing to the first row */
735  if (!s->last_row)
736  return AVERROR_INVALIDDATA;
737  if (s->interlace_type ||
740  if (!s->tmp_row)
741  return AVERROR_INVALIDDATA;
742  }
743  /* compressed row */
745  if (!s->buffer)
746  return AVERROR(ENOMEM);
747 
748  /* we want crow_buf+1 to be 16-byte aligned */
749  s->crow_buf = s->buffer + 15;
750  s->zstream.avail_out = s->crow_size;
751  s->zstream.next_out = s->crow_buf;
752  }
753 
754  s->pic_state |= PNG_IDAT;
755 
756  /* set image to non-transparent bpp while decompressing */
758  s->bpp -= byte_depth;
759 
760  ret = png_decode_idat(s, length);
761 
763  s->bpp += byte_depth;
764 
765  if (ret < 0)
766  return ret;
767 
768  bytestream2_skip(&s->gb, 4); /* crc */
769 
770  return 0;
771 }
772 
774  uint32_t length)
775 {
776  int n, i, r, g, b;
777 
778  if ((length % 3) != 0 || length > 256 * 3)
779  return AVERROR_INVALIDDATA;
780  /* read the palette */
781  n = length / 3;
782  for (i = 0; i < n; i++) {
783  r = bytestream2_get_byte(&s->gb);
784  g = bytestream2_get_byte(&s->gb);
785  b = bytestream2_get_byte(&s->gb);
786  s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
787  }
788  for (; i < 256; i++)
789  s->palette[i] = (0xFFU << 24);
790  s->hdr_state |= PNG_PLTE;
791  bytestream2_skip(&s->gb, 4); /* crc */
792 
793  return 0;
794 }
795 
797  uint32_t length)
798 {
799  int v, i;
800 
801  if (!(s->hdr_state & PNG_IHDR)) {
802  av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
803  return AVERROR_INVALIDDATA;
804  }
805 
806  if (s->pic_state & PNG_IDAT) {
807  av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
808  return AVERROR_INVALIDDATA;
809  }
810 
812  if (length > 256 || !(s->hdr_state & PNG_PLTE))
813  return AVERROR_INVALIDDATA;
814 
815  for (i = 0; i < length; i++) {
816  unsigned v = bytestream2_get_byte(&s->gb);
817  s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
818  }
819  } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
820  if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
821  (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
822  s->bit_depth == 1)
823  return AVERROR_INVALIDDATA;
824 
825  for (i = 0; i < length / 2; i++) {
826  /* only use the least significant bits */
827  v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
828 
829  if (s->bit_depth > 8)
830  AV_WB16(&s->transparent_color_be[2 * i], v);
831  else
832  s->transparent_color_be[i] = v;
833  }
834  } else {
835  return AVERROR_INVALIDDATA;
836  }
837 
838  bytestream2_skip(&s->gb, 4); /* crc */
839  s->has_trns = 1;
840 
841  return 0;
842 }
843 
845 {
846  int ret, cnt = 0;
847  uint8_t *data, profile_name[82];
848  AVBPrint bp;
849  AVFrameSideData *sd;
850 
851  while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
852  if (cnt > 80) {
853  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
854  return AVERROR_INVALIDDATA;
855  }
856 
857  length = FFMAX(length - cnt, 0);
858 
859  if (bytestream2_get_byte(&s->gb) != 0) {
860  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
861  return AVERROR_INVALIDDATA;
862  }
863 
864  length = FFMAX(length - 1, 0);
865 
866  if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
867  return ret;
868 
869  ret = av_bprint_finalize(&bp, (char **)&data);
870  if (ret < 0)
871  return ret;
872 
874  if (!sd) {
875  av_free(data);
876  return AVERROR(ENOMEM);
877  }
878 
879  av_dict_set(&sd->metadata, "name", profile_name, 0);
880  memcpy(sd->data, data, bp.len);
881  av_free(data);
882 
883  /* ICC compressed data and CRC */
884  bytestream2_skip(&s->gb, length + 4);
885 
886  return 0;
887 }
888 
890 {
891  if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
892  int i, j, k;
893  uint8_t *pd = p->data[0];
894  for (j = 0; j < s->height; j++) {
895  i = s->width / 8;
896  for (k = 7; k >= 1; k--)
897  if ((s->width&7) >= k)
898  pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
899  for (i--; i >= 0; i--) {
900  pd[8*i + 7]= pd[i] & 1;
901  pd[8*i + 6]= (pd[i]>>1) & 1;
902  pd[8*i + 5]= (pd[i]>>2) & 1;
903  pd[8*i + 4]= (pd[i]>>3) & 1;
904  pd[8*i + 3]= (pd[i]>>4) & 1;
905  pd[8*i + 2]= (pd[i]>>5) & 1;
906  pd[8*i + 1]= (pd[i]>>6) & 1;
907  pd[8*i + 0]= pd[i]>>7;
908  }
909  pd += s->image_linesize;
910  }
911  } else if (s->bits_per_pixel == 2) {
912  int i, j;
913  uint8_t *pd = p->data[0];
914  for (j = 0; j < s->height; j++) {
915  i = s->width / 4;
917  if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
918  if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
919  if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
920  for (i--; i >= 0; i--) {
921  pd[4*i + 3]= pd[i] & 3;
922  pd[4*i + 2]= (pd[i]>>2) & 3;
923  pd[4*i + 1]= (pd[i]>>4) & 3;
924  pd[4*i + 0]= pd[i]>>6;
925  }
926  } else {
927  if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
928  if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
929  if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
930  for (i--; i >= 0; i--) {
931  pd[4*i + 3]= ( pd[i] & 3)*0x55;
932  pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
933  pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
934  pd[4*i + 0]= ( pd[i]>>6 )*0x55;
935  }
936  }
937  pd += s->image_linesize;
938  }
939  } else if (s->bits_per_pixel == 4) {
940  int i, j;
941  uint8_t *pd = p->data[0];
942  for (j = 0; j < s->height; j++) {
943  i = s->width/2;
945  if (s->width&1) pd[2*i+0]= pd[i]>>4;
946  for (i--; i >= 0; i--) {
947  pd[2*i + 1] = pd[i] & 15;
948  pd[2*i + 0] = pd[i] >> 4;
949  }
950  } else {
951  if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
952  for (i--; i >= 0; i--) {
953  pd[2*i + 1] = (pd[i] & 15) * 0x11;
954  pd[2*i + 0] = (pd[i] >> 4) * 0x11;
955  }
956  }
957  pd += s->image_linesize;
958  }
959  }
960 }
961 
963  uint32_t length)
964 {
965  uint32_t sequence_number;
967 
968  if (length != 26)
969  return AVERROR_INVALIDDATA;
970 
971  if (!(s->hdr_state & PNG_IHDR)) {
972  av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
973  return AVERROR_INVALIDDATA;
974  }
975 
976  s->last_w = s->cur_w;
977  s->last_h = s->cur_h;
978  s->last_x_offset = s->x_offset;
979  s->last_y_offset = s->y_offset;
980  s->last_dispose_op = s->dispose_op;
981 
982  sequence_number = bytestream2_get_be32(&s->gb);
983  cur_w = bytestream2_get_be32(&s->gb);
984  cur_h = bytestream2_get_be32(&s->gb);
985  x_offset = bytestream2_get_be32(&s->gb);
986  y_offset = bytestream2_get_be32(&s->gb);
987  bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
988  dispose_op = bytestream2_get_byte(&s->gb);
989  blend_op = bytestream2_get_byte(&s->gb);
990  bytestream2_skip(&s->gb, 4); /* crc */
991 
992  if (sequence_number == 0 &&
993  (cur_w != s->width ||
994  cur_h != s->height ||
995  x_offset != 0 ||
996  y_offset != 0) ||
997  cur_w <= 0 || cur_h <= 0 ||
998  x_offset < 0 || y_offset < 0 ||
999  cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
1000  return AVERROR_INVALIDDATA;
1001 
1002  if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
1003  av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
1004  return AVERROR_INVALIDDATA;
1005  }
1006 
1007  if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
1008  dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
1009  // No previous frame to revert to for the first frame
1010  // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
1011  dispose_op = APNG_DISPOSE_OP_BACKGROUND;
1012  }
1013 
1014  if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
1015  avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
1016  avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1017  avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
1018  avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
1019  avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
1020  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
1021  )) {
1022  // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
1023  blend_op = APNG_BLEND_OP_SOURCE;
1024  }
1025 
1026  s->cur_w = cur_w;
1027  s->cur_h = cur_h;
1028  s->x_offset = x_offset;
1029  s->y_offset = y_offset;
1030  s->dispose_op = dispose_op;
1031  s->blend_op = blend_op;
1032 
1033  return 0;
1034 }
1035 
1037 {
1038  int i, j;
1039  uint8_t *pd = p->data[0];
1040  uint8_t *pd_last = s->last_picture.f->data[0];
1041  int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
1042 
1043  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1044  for (j = 0; j < s->height; j++) {
1045  for (i = 0; i < ls; i++)
1046  pd[i] += pd_last[i];
1047  pd += s->image_linesize;
1048  pd_last += s->image_linesize;
1049  }
1050 }
1051 
1052 // divide by 255 and round to nearest
1053 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
1054 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1055 
1057  AVFrame *p)
1058 {
1059  size_t x, y;
1060  uint8_t *buffer;
1061 
1062  if (s->blend_op == APNG_BLEND_OP_OVER &&
1063  avctx->pix_fmt != AV_PIX_FMT_RGBA &&
1064  avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
1065  avctx->pix_fmt != AV_PIX_FMT_PAL8) {
1066  avpriv_request_sample(avctx, "Blending with pixel format %s",
1067  av_get_pix_fmt_name(avctx->pix_fmt));
1068  return AVERROR_PATCHWELCOME;
1069  }
1070 
1071  buffer = av_malloc_array(s->image_linesize, s->height);
1072  if (!buffer)
1073  return AVERROR(ENOMEM);
1074 
1075 
1076  // Do the disposal operation specified by the last frame on the frame
1078  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1079  memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
1080 
1082  for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
1083  memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
1084 
1085  memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
1087  } else {
1088  ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
1089  memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
1090  }
1091 
1092  // Perform blending
1093  if (s->blend_op == APNG_BLEND_OP_SOURCE) {
1094  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1095  size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
1096  memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
1097  }
1098  } else { // APNG_BLEND_OP_OVER
1099  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1100  uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
1101  uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
1102  for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
1103  size_t b;
1104  uint8_t foreground_alpha, background_alpha, output_alpha;
1105  uint8_t output[10];
1106 
1107  // Since we might be blending alpha onto alpha, we use the following equations:
1108  // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
1109  // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
1110 
1111  switch (avctx->pix_fmt) {
1112  case AV_PIX_FMT_RGBA:
1113  foreground_alpha = foreground[3];
1114  background_alpha = background[3];
1115  break;
1116 
1117  case AV_PIX_FMT_GRAY8A:
1118  foreground_alpha = foreground[1];
1119  background_alpha = background[1];
1120  break;
1121 
1122  case AV_PIX_FMT_PAL8:
1123  foreground_alpha = s->palette[foreground[0]] >> 24;
1124  background_alpha = s->palette[background[0]] >> 24;
1125  break;
1126  }
1127 
1128  if (foreground_alpha == 0)
1129  continue;
1130 
1131  if (foreground_alpha == 255) {
1132  memcpy(background, foreground, s->bpp);
1133  continue;
1134  }
1135 
1136  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1137  // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
1138  avpriv_request_sample(avctx, "Alpha blending palette samples");
1139  background[0] = foreground[0];
1140  continue;
1141  }
1142 
1143  output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
1144 
1145  av_assert0(s->bpp <= 10);
1146 
1147  for (b = 0; b < s->bpp - 1; ++b) {
1148  if (output_alpha == 0) {
1149  output[b] = 0;
1150  } else if (background_alpha == 255) {
1151  output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
1152  } else {
1153  output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
1154  }
1155  }
1156  output[b] = output_alpha;
1157  memcpy(background, output, s->bpp);
1158  }
1159  }
1160  }
1161 
1162  // Copy blended buffer into the frame and free
1163  memcpy(p->data[0], buffer, s->image_linesize * s->height);
1164  av_free(buffer);
1165 
1166  return 0;
1167 }
1168 
1170  AVFrame *p, AVPacket *avpkt)
1171 {
1172  AVDictionary **metadatap = NULL;
1173  uint32_t tag, length;
1174  int decode_next_dat = 0;
1175  int i, ret;
1176 
1177  for (;;) {
1178  length = bytestream2_get_bytes_left(&s->gb);
1179  if (length <= 0) {
1180 
1181  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1182  avctx->skip_frame == AVDISCARD_ALL) {
1183  return 0;
1184  }
1185 
1186  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
1187  if (!(s->pic_state & PNG_IDAT))
1188  return 0;
1189  else
1190  goto exit_loop;
1191  }
1192  av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
1193  if ( s->pic_state & PNG_ALLIMAGE
1195  goto exit_loop;
1196  ret = AVERROR_INVALIDDATA;
1197  goto fail;
1198  }
1199 
1200  length = bytestream2_get_be32(&s->gb);
1201  if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
1202  av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
1203  ret = AVERROR_INVALIDDATA;
1204  goto fail;
1205  }
1206  tag = bytestream2_get_le32(&s->gb);
1207  if (avctx->debug & FF_DEBUG_STARTCODE)
1208  av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
1209  av_fourcc2str(tag), length);
1210 
1211  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1212  avctx->skip_frame == AVDISCARD_ALL) {
1213  switch(tag) {
1214  case MKTAG('I', 'H', 'D', 'R'):
1215  case MKTAG('p', 'H', 'Y', 's'):
1216  case MKTAG('t', 'E', 'X', 't'):
1217  case MKTAG('I', 'D', 'A', 'T'):
1218  case MKTAG('t', 'R', 'N', 'S'):
1219  break;
1220  default:
1221  goto skip_tag;
1222  }
1223  }
1224 
1225  metadatap = &p->metadata;
1226  switch (tag) {
1227  case MKTAG('I', 'H', 'D', 'R'):
1228  if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
1229  goto fail;
1230  break;
1231  case MKTAG('p', 'H', 'Y', 's'):
1232  if ((ret = decode_phys_chunk(avctx, s)) < 0)
1233  goto fail;
1234  break;
1235  case MKTAG('f', 'c', 'T', 'L'):
1236  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1237  goto skip_tag;
1238  if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
1239  goto fail;
1240  decode_next_dat = 1;
1241  break;
1242  case MKTAG('f', 'd', 'A', 'T'):
1243  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1244  goto skip_tag;
1245  if (!decode_next_dat) {
1246  ret = AVERROR_INVALIDDATA;
1247  goto fail;
1248  }
1249  bytestream2_get_be32(&s->gb);
1250  length -= 4;
1251  /* fallthrough */
1252  case MKTAG('I', 'D', 'A', 'T'):
1253  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
1254  goto skip_tag;
1255  if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
1256  goto fail;
1257  break;
1258  case MKTAG('P', 'L', 'T', 'E'):
1259  if (decode_plte_chunk(avctx, s, length) < 0)
1260  goto skip_tag;
1261  break;
1262  case MKTAG('t', 'R', 'N', 'S'):
1263  if (decode_trns_chunk(avctx, s, length) < 0)
1264  goto skip_tag;
1265  break;
1266  case MKTAG('t', 'E', 'X', 't'):
1267  if (decode_text_chunk(s, length, 0, metadatap) < 0)
1268  av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
1269  bytestream2_skip(&s->gb, length + 4);
1270  break;
1271  case MKTAG('z', 'T', 'X', 't'):
1272  if (decode_text_chunk(s, length, 1, metadatap) < 0)
1273  av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
1274  bytestream2_skip(&s->gb, length + 4);
1275  break;
1276  case MKTAG('s', 'T', 'E', 'R'): {
1277  int mode = bytestream2_get_byte(&s->gb);
1279  if (!stereo3d)
1280  goto fail;
1281 
1282  if (mode == 0 || mode == 1) {
1283  stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1284  stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
1285  } else {
1286  av_log(avctx, AV_LOG_WARNING,
1287  "Unknown value in sTER chunk (%d)\n", mode);
1288  }
1289  bytestream2_skip(&s->gb, 4); /* crc */
1290  break;
1291  }
1292  case MKTAG('i', 'C', 'C', 'P'): {
1293  if (decode_iccp_chunk(s, length, p) < 0)
1294  goto fail;
1295  break;
1296  }
1297  case MKTAG('c', 'H', 'R', 'M'): {
1299  if (!mdm) {
1300  ret = AVERROR(ENOMEM);
1301  goto fail;
1302  }
1303 
1304  mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1305  mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1306 
1307  /* RGB Primaries */
1308  for (i = 0; i < 3; i++) {
1309  mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1310  mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1311  }
1312 
1313  mdm->has_primaries = 1;
1314  bytestream2_skip(&s->gb, 4); /* crc */
1315  break;
1316  }
1317  case MKTAG('g', 'A', 'M', 'A'): {
1318  AVBPrint bp;
1319  char *gamma_str;
1320  int num = bytestream2_get_be32(&s->gb);
1321 
1323  av_bprintf(&bp, "%i/%i", num, 100000);
1324  ret = av_bprint_finalize(&bp, &gamma_str);
1325  if (ret < 0)
1326  return ret;
1327 
1328  av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
1329 
1330  bytestream2_skip(&s->gb, 4); /* crc */
1331  break;
1332  }
1333  case MKTAG('I', 'E', 'N', 'D'):
1334  if (!(s->pic_state & PNG_ALLIMAGE))
1335  av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
1336  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1337  ret = AVERROR_INVALIDDATA;
1338  goto fail;
1339  }
1340  bytestream2_skip(&s->gb, 4); /* crc */
1341  goto exit_loop;
1342  default:
1343  /* skip tag */
1344 skip_tag:
1345  bytestream2_skip(&s->gb, length + 4);
1346  break;
1347  }
1348  }
1349 exit_loop:
1350 
1351  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1352  avctx->skip_frame == AVDISCARD_ALL) {
1353  return 0;
1354  }
1355 
1356  if (s->bits_per_pixel <= 4)
1357  handle_small_bpp(s, p);
1358 
1359  /* apply transparency if needed */
1360  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
1361  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
1362  size_t raw_bpp = s->bpp - byte_depth;
1363  unsigned x, y;
1364 
1365  av_assert0(s->bit_depth > 1);
1366 
1367  for (y = 0; y < s->height; ++y) {
1368  uint8_t *row = &s->image_buf[s->image_linesize * y];
1369 
1370  /* since we're updating in-place, we have to go from right to left */
1371  for (x = s->width; x > 0; --x) {
1372  uint8_t *pixel = &row[s->bpp * (x - 1)];
1373  memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1374 
1375  if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
1376  memset(&pixel[raw_bpp], 0, byte_depth);
1377  } else {
1378  memset(&pixel[raw_bpp], 0xff, byte_depth);
1379  }
1380  }
1381  }
1382  }
1383 
1384  /* handle P-frames only if a predecessor frame is available */
1385  if (s->last_picture.f->data[0]) {
1386  if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
1387  && s->last_picture.f->width == p->width
1388  && s->last_picture.f->height== p->height
1389  && s->last_picture.f->format== p->format
1390  ) {
1391  if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
1392  handle_p_frame_png(s, p);
1393  else if (CONFIG_APNG_DECODER &&
1394  avctx->codec_id == AV_CODEC_ID_APNG &&
1395  (ret = handle_p_frame_apng(avctx, s, p)) < 0)
1396  goto fail;
1397  }
1398  }
1399  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1401 
1402  return 0;
1403 
1404 fail:
1405  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1407  return ret;
1408 }
1409 
1410 #if CONFIG_PNG_DECODER
1411 static int decode_frame_png(AVCodecContext *avctx,
1412  void *data, int *got_frame,
1413  AVPacket *avpkt)
1414 {
1415  PNGDecContext *const s = avctx->priv_data;
1416  const uint8_t *buf = avpkt->data;
1417  int buf_size = avpkt->size;
1418  AVFrame *p;
1419  int64_t sig;
1420  int ret;
1421 
1424  p = s->picture.f;
1425 
1426  bytestream2_init(&s->gb, buf, buf_size);
1427 
1428  /* check signature */
1429  sig = bytestream2_get_be64(&s->gb);
1430  if (sig != PNGSIG &&
1431  sig != MNGSIG) {
1432  av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
1433  return AVERROR_INVALIDDATA;
1434  }
1435 
1436  s->y = s->has_trns = 0;
1437  s->hdr_state = 0;
1438  s->pic_state = 0;
1439 
1440  /* init the zlib */
1441  s->zstream.zalloc = ff_png_zalloc;
1442  s->zstream.zfree = ff_png_zfree;
1443  s->zstream.opaque = NULL;
1444  ret = inflateInit(&s->zstream);
1445  if (ret != Z_OK) {
1446  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1447  return AVERROR_EXTERNAL;
1448  }
1449 
1450  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1451  goto the_end;
1452 
1453  if (avctx->skip_frame == AVDISCARD_ALL) {
1454  *got_frame = 0;
1455  ret = bytestream2_tell(&s->gb);
1456  goto the_end;
1457  }
1458 
1459  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1460  goto the_end;
1461 
1462  *got_frame = 1;
1463 
1464  ret = bytestream2_tell(&s->gb);
1465 the_end:
1466  inflateEnd(&s->zstream);
1467  s->crow_buf = NULL;
1468  return ret;
1469 }
1470 #endif
1471 
1472 #if CONFIG_APNG_DECODER
1473 static int decode_frame_apng(AVCodecContext *avctx,
1474  void *data, int *got_frame,
1475  AVPacket *avpkt)
1476 {
1477  PNGDecContext *const s = avctx->priv_data;
1478  int ret;
1479  AVFrame *p;
1480 
1483  p = s->picture.f;
1484 
1485  if (!(s->hdr_state & PNG_IHDR)) {
1486  if (!avctx->extradata_size)
1487  return AVERROR_INVALIDDATA;
1488 
1489  /* only init fields, there is no zlib use in extradata */
1490  s->zstream.zalloc = ff_png_zalloc;
1491  s->zstream.zfree = ff_png_zfree;
1492 
1493  bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
1494  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1495  goto end;
1496  }
1497 
1498  /* reset state for a new frame */
1499  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1500  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1501  ret = AVERROR_EXTERNAL;
1502  goto end;
1503  }
1504  s->y = 0;
1505  s->pic_state = 0;
1506  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1507  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1508  goto end;
1509 
1510  if (!(s->pic_state & PNG_ALLIMAGE))
1511  av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
1512  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1513  ret = AVERROR_INVALIDDATA;
1514  goto end;
1515  }
1516  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1517  goto end;
1518 
1519  *got_frame = 1;
1520  ret = bytestream2_tell(&s->gb);
1521 
1522 end:
1523  inflateEnd(&s->zstream);
1524  return ret;
1525 }
1526 #endif
1527 
1528 #if CONFIG_LSCR_DECODER
1529 static int decode_frame_lscr(AVCodecContext *avctx,
1530  void *data, int *got_frame,
1531  AVPacket *avpkt)
1532 {
1533  PNGDecContext *const s = avctx->priv_data;
1534  GetByteContext *gb = &s->gb;
1535  AVFrame *frame = data;
1536  int ret, nb_blocks, offset = 0;
1537 
1538  if (avpkt->size < 2)
1539  return AVERROR_INVALIDDATA;
1540 
1541  bytestream2_init(gb, avpkt->data, avpkt->size);
1542 
1543  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1544  return ret;
1545 
1546  nb_blocks = bytestream2_get_le16(gb);
1547  if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * 12)
1548  return AVERROR_INVALIDDATA;
1549 
1550  if (s->last_picture.f->data[0]) {
1551  ret = av_frame_copy(frame, s->last_picture.f);
1552  if (ret < 0)
1553  return ret;
1554  }
1555 
1556  for (int b = 0; b < nb_blocks; b++) {
1557  int x, y, x2, y2, w, h, left;
1558  uint32_t csize, size;
1559 
1560  s->zstream.zalloc = ff_png_zalloc;
1561  s->zstream.zfree = ff_png_zfree;
1562  s->zstream.opaque = NULL;
1563 
1564  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1565  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1566  ret = AVERROR_EXTERNAL;
1567  goto end;
1568  }
1569 
1570  bytestream2_seek(gb, 2 + b * 12, SEEK_SET);
1571 
1572  x = bytestream2_get_le16(gb);
1573  y = bytestream2_get_le16(gb);
1574  x2 = bytestream2_get_le16(gb);
1575  y2 = bytestream2_get_le16(gb);
1576  s->width = s->cur_w = w = x2-x;
1577  s->height = s->cur_h = h = y2-y;
1578 
1579  if (w <= 0 || x < 0 || x >= avctx->width || w + x > avctx->width ||
1580  h <= 0 || y < 0 || y >= avctx->height || h + y > avctx->height) {
1581  ret = AVERROR_INVALIDDATA;
1582  goto end;
1583  }
1584 
1585  size = bytestream2_get_le32(gb);
1586 
1587  frame->key_frame = (nb_blocks == 1) &&
1588  (w == avctx->width) &&
1589  (h == avctx->height) &&
1590  (x == 0) && (y == 0);
1591 
1592  bytestream2_seek(gb, 2 + nb_blocks * 12 + offset, SEEK_SET);
1593  csize = bytestream2_get_be32(gb);
1594  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1595  ret = AVERROR_INVALIDDATA;
1596  goto end;
1597  }
1598 
1599  offset += size;
1600  left = size;
1601 
1602  s->y = 0;
1603  s->row_size = w * 3;
1604 
1605  av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
1606  if (!s->buffer) {
1607  ret = AVERROR(ENOMEM);
1608  goto end;
1609  }
1610 
1612  if (!s->last_row) {
1613  ret = AVERROR(ENOMEM);
1614  goto end;
1615  }
1616 
1617  s->crow_size = w * 3 + 1;
1618  s->crow_buf = s->buffer + 15;
1619  s->zstream.avail_out = s->crow_size;
1620  s->zstream.next_out = s->crow_buf;
1621  s->image_buf = frame->data[0] + (avctx->height - y - 1) * frame->linesize[0] + x * 3;
1622  s->image_linesize =-frame->linesize[0];
1623  s->bpp = 3;
1624  s->pic_state = 0;
1625 
1626  while (left > 16) {
1627  ret = png_decode_idat(s, csize);
1628  if (ret < 0)
1629  goto end;
1630  left -= csize + 16;
1631  if (left > 16) {
1632  bytestream2_skip(gb, 4);
1633  csize = bytestream2_get_be32(gb);
1634  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1635  ret = AVERROR_INVALIDDATA;
1636  goto end;
1637  }
1638  }
1639  }
1640 
1641  inflateEnd(&s->zstream);
1642  }
1643 
1645 
1647  if ((ret = av_frame_ref(s->last_picture.f, frame)) < 0)
1648  return ret;
1649 
1650  *got_frame = 1;
1651 end:
1652  inflateEnd(&s->zstream);
1653 
1654  if (ret < 0)
1655  return ret;
1656  return avpkt->size;
1657 }
1658 
1659 static void decode_flush(AVCodecContext *avctx)
1660 {
1661  PNGDecContext *s = avctx->priv_data;
1662 
1664 }
1665 
1666 #endif
1667 
1668 #if HAVE_THREADS
1669 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1670 {
1671  PNGDecContext *psrc = src->priv_data;
1672  PNGDecContext *pdst = dst->priv_data;
1673  int ret;
1674 
1675  if (dst == src)
1676  return 0;
1677 
1678  ff_thread_release_buffer(dst, &pdst->picture);
1679  if (psrc->picture.f->data[0] &&
1680  (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
1681  return ret;
1682  if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
1683  pdst->width = psrc->width;
1684  pdst->height = psrc->height;
1685  pdst->bit_depth = psrc->bit_depth;
1686  pdst->color_type = psrc->color_type;
1687  pdst->compression_type = psrc->compression_type;
1688  pdst->interlace_type = psrc->interlace_type;
1689  pdst->filter_type = psrc->filter_type;
1690  pdst->cur_w = psrc->cur_w;
1691  pdst->cur_h = psrc->cur_h;
1692  pdst->x_offset = psrc->x_offset;
1693  pdst->y_offset = psrc->y_offset;
1694  pdst->has_trns = psrc->has_trns;
1695  memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
1696 
1697  pdst->dispose_op = psrc->dispose_op;
1698 
1699  memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
1700 
1701  pdst->hdr_state |= psrc->hdr_state;
1702 
1704  if (psrc->last_picture.f->data[0] &&
1705  (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
1706  return ret;
1707 
1709  if (psrc->previous_picture.f->data[0] &&
1710  (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
1711  return ret;
1712  }
1713 
1714  return 0;
1715 }
1716 #endif
1717 
1719 {
1720  PNGDecContext *s = avctx->priv_data;
1721 
1722  avctx->color_range = AVCOL_RANGE_JPEG;
1723 
1724  if (avctx->codec_id == AV_CODEC_ID_LSCR)
1725  avctx->pix_fmt = AV_PIX_FMT_BGR24;
1726 
1727  s->avctx = avctx;
1729  s->last_picture.f = av_frame_alloc();
1730  s->picture.f = av_frame_alloc();
1731  if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
1734  av_frame_free(&s->picture.f);
1735  return AVERROR(ENOMEM);
1736  }
1737 
1738  if (!avctx->internal->is_copy) {
1739  avctx->internal->allocate_progress = 1;
1740  ff_pngdsp_init(&s->dsp);
1741  }
1742 
1743  return 0;
1744 }
1745 
1747 {
1748  PNGDecContext *s = avctx->priv_data;
1749 
1754  ff_thread_release_buffer(avctx, &s->picture);
1755  av_frame_free(&s->picture.f);
1756  av_freep(&s->buffer);
1757  s->buffer_size = 0;
1758  av_freep(&s->last_row);
1759  s->last_row_size = 0;
1760  av_freep(&s->tmp_row);
1761  s->tmp_row_size = 0;
1762 
1763  return 0;
1764 }
1765 
1766 #if CONFIG_APNG_DECODER
1768  .name = "apng",
1769  .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
1770  .type = AVMEDIA_TYPE_VIDEO,
1771  .id = AV_CODEC_ID_APNG,
1772  .priv_data_size = sizeof(PNGDecContext),
1773  .init = png_dec_init,
1774  .close = png_dec_end,
1775  .decode = decode_frame_apng,
1777  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1778  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1779  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1780 };
1781 #endif
1782 
1783 #if CONFIG_PNG_DECODER
1785  .name = "png",
1786  .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
1787  .type = AVMEDIA_TYPE_VIDEO,
1788  .id = AV_CODEC_ID_PNG,
1789  .priv_data_size = sizeof(PNGDecContext),
1790  .init = png_dec_init,
1791  .close = png_dec_end,
1792  .decode = decode_frame_png,
1794  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1795  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1797 };
1798 #endif
1799 
1800 #if CONFIG_LSCR_DECODER
1802  .name = "lscr",
1803  .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
1804  .type = AVMEDIA_TYPE_VIDEO,
1805  .id = AV_CODEC_ID_LSCR,
1806  .priv_data_size = sizeof(PNGDecContext),
1807  .init = png_dec_init,
1808  .close = png_dec_end,
1809  .decode = decode_frame_lscr,
1810  .flush = decode_flush,
1811  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1813 };
1814 #endif
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length, AVFrame *p)
Definition: pngdec.c:618
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:962
#define PNG_FILTER_VALUE_AVG
Definition: png.h:41
static void png_handle_row(PNGDecContext *s)
Definition: pngdec.c:323
ThreadFrame previous_picture
Definition: pngdec.c:55
#define NULL
Definition: coverity.c:32
int last_y_offset
Definition: pngdec.c:65
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane...
Definition: imgutils.c:76
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
int width
Definition: pngdec.c:61
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
unsigned int tmp_row_size
Definition: pngdec.c:86
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
const char * g
Definition: vf_curves.c:115
int pass_row_size
Definition: pngdec.c:92
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVDictionary * metadata
Definition: frame.h:205
uint8_t * tmp_row
Definition: pngdec.c:85
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
#define avpriv_request_sample(...)
PNGHeaderState
Definition: pngdec.c:40
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2196
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
int num
Numerator.
Definition: rational.h:59
static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed, AVDictionary **dict)
Definition: pngdec.c:502
int size
Definition: avcodec.h:1478
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1944
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
enum PNGImageState pic_state
Definition: pngdec.c:60
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
discard all
Definition: avcodec.h:811
Views are next to each other.
Definition: stereo3d.h:67
#define PNG_COLOR_TYPE_RGB
Definition: png.h:33
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
Definition: pngdsp.h:28
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
#define PNG_COLOR_TYPE_GRAY_ALPHA
Definition: png.h:35
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3477
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
#define PNG_COLOR_TYPE_PALETTE
Definition: png.h:32
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int filter_type
Definition: pngdec.c:72
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdec.c:185
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that&#39;s been allocated with av_malloc() or another memory allocation function...
Definition: dict.h:73
#define PNG_FILTER_VALUE_PAETH
Definition: png.h:42
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3036
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
int y_offset
Definition: pngdec.c:64
uint8_t
#define av_cold
Definition: attributes.h:82
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.If the codec allocates writable tables in its init()
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define PNG_COLOR_TYPE_RGB_ALPHA
Definition: png.h:34
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2647
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
AVCodec ff_apng_decoder
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
Definition: pngdec.c:602
Structure to hold side data for an AVFrame.
Definition: frame.h:201
uint8_t * data
Definition: avcodec.h:1477
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1496
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1794
#define ff_dlog(a,...)
AVDictionary * metadata
metadata.
Definition: frame.h:554
static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
Definition: pngdec.c:844
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:415
AVCodec ff_lscr_decoder
ptrdiff_t size
Definition: opengl_enc.c:100
unsigned int last_row_size
Definition: pngdec.c:84
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
int cur_h
Definition: pngdec.c:62
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:773
#define U(x)
Definition: vp56_arith.h:37
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const uint8_t png_pass_dsp_mask[NB_PASSES]
Definition: pngdec.c:108
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
#define AV_BPRINT_SIZE_UNLIMITED
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt)
Definition: pngdec.c:1169
static const uint16_t mask[17]
Definition: lzw.c:38
#define OP_SUB(x, s, l)
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:136
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1036
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
uint8_t * crow_buf
Definition: pngdec.c:82
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int pass
Definition: pngdec.c:89
int ff_png_get_nb_channels(int color_type)
Definition: png.c:49
ThreadFrame picture
Definition: pngdec.c:57
int height
Definition: pngdec.c:61
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define PNGSIG
Definition: png.h:47
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
int bits_per_pixel
Definition: pngdec.c:74
GetByteContext gb
Definition: pngdec.c:54
#define FFMAX(a, b)
Definition: common.h:94
#define NB_PASSES
Definition: png.h:45
#define fail()
Definition: checkasm.h:120
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
uint8_t blend_op
Definition: pngdec.c:66
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
z_stream zstream
Definition: pngdec.c:94
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define b
Definition: input.c:41
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:146
#define FFMIN(a, b)
Definition: common.h:96
#define PNG_FILTER_VALUE_SUB
Definition: png.h:39
uint32_t palette[256]
Definition: pngdec.c:81
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define PNG_COLOR_TYPE_GRAY
Definition: png.h:31
static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
Definition: pngdec.c:251
int width
picture width / height.
Definition: avcodec.h:1738
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
uint8_t w
Definition: llviddspenc.c:38
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
uint8_t * last_row
Definition: pngdec.c:83
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVCodecContext * avctx
Definition: pngdec.c:52
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
Definition: bprint.c:218
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
Definition: pngdsp.c:43
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end)
Definition: pngdec.c:434
static void error(const char *err)
int channels
Definition: pngdec.c:73
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
if(ret)
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:550
static uint8_t * iso88591_to_utf8(const uint8_t *in, size_t size_in)
Definition: pngdec.c:478
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
static av_cold int png_dec_init(AVCodecContext *avctx)
Definition: pngdec.c:1718
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
Libavcodec external API header.
enum PNGHeaderState hdr_state
Definition: pngdec.c:59
int buffer_size
Definition: pngdec.c:88
static int skip_tag(AVIOContext *in, int32_t tag_name)
Definition: ismindex.c:132
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
enum AVCodecID codec_id
Definition: avcodec.h:1575
#define PNG_FILTER_VALUE_UP
Definition: png.h:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
#define PNG_FILTER_TYPE_LOCO
Definition: png.h:37
uint8_t last_dispose_op
Definition: pngdec.c:67
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
int debug
debug
Definition: avcodec.h:2646
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1565
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1590
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
uint8_t * data
Definition: frame.h:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
int interlace_type
Definition: pngdec.c:71
PNGImageState
Definition: pngdec.c:45
void * buf
Definition: avisynth_c.h:766
const uint8_t ff_png_pass_ymask[NB_PASSES]
Definition: png.c:25
int image_linesize
Definition: pngdec.c:80
int extradata_size
Definition: avcodec.h:1667
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2627
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:197
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Mastering display metadata capable of representing the color volume of the display used to master the...
int cur_w
Definition: pngdec.c:62
uint8_t transparent_color_be[6]
Definition: pngdec.c:77
#define OP_AVG(x, s, l)
uint8_t * image_buf
Definition: pngdec.c:79
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
uint8_t dispose_op
Definition: pngdec.c:66
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
uint8_t pixel
Definition: tiny_ssim.c:42
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int last_x_offset
Definition: pngdec.c:65
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
#define FAST_DIV255(x)
Definition: pngdec.c:1054
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1056
#define YUV2RGB(NAME, TYPE)
Definition: pngdec.c:308
static const uint8_t png_pass_mask[NB_PASSES]
Definition: pngdec.c:98
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
Y , 8bpp.
Definition: pixfmt.h:74
static av_cold int png_dec_end(AVCodecContext *avctx)
Definition: pngdec.c:1746
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdsp.h:33
common internal api header.
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:889
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
#define PNG_FILTER_VALUE_NONE
Definition: png.h:38
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:796
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
int last_w
Definition: pngdec.c:63
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call...
Definition: utils.c:82
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
Definition: pngdec.c:103
Stereoscopic video.
int den
Denominator.
Definition: rational.h:60
void ff_png_zfree(void *opaque, void *ptr)
Definition: png.c:44
void * priv_data
Definition: avcodec.h:1592
static int png_decode_idat(PNGDecContext *s, int length)
Definition: pngdec.c:404
uint8_t * buffer
Definition: pngdec.c:87
#define av_free(p)
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2660
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:346
int row_size
Definition: pngdec.c:91
APNG common header.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
PNGDSPContext dsp
Definition: pngdec.c:51
int compression_type
Definition: pngdec.c:70
int last_h
Definition: pngdec.c:63
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
Definition: png.c:62
int height
Definition: frame.h:326
FILE * out
Definition: movenc.c:54
int bit_depth
Definition: pngdec.c:68
#define av_freep(p)
int color_type
Definition: pngdec.c:69
ThreadFrame last_picture
Definition: pngdec.c:56
#define av_malloc_array(a, b)
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
Definition: pngdec.c:115
#define FFSWAP(type, a, b)
Definition: common.h:99
int crow_size
Definition: pngdec.c:90
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1250
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int x_offset
Definition: pngdec.c:64
#define MKTAG(a, b, c, d)
Definition: common.h:366
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
Definition: png.c:39
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1454
int has_trns
Definition: pngdec.c:76
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2624
AVCodec ff_png_decoder
Predicted.
Definition: avutil.h:275
#define UNROLL_FILTER(op)
Definition: pngdec.c:236
#define MNGSIG
Definition: png.h:48