FFmpeg
pngdec.c
Go to the documentation of this file.
1 /*
2  * PNG image format
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 //#define DEBUG
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/bprint.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/intreadwrite.h"
28 #include "libavutil/stereo3d.h"
30 
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 #include "apng.h"
35 #include "png.h"
36 #include "pngdsp.h"
37 #include "thread.h"
38 
39 #include <zlib.h>
40 
42  PNG_IHDR = 1 << 0,
43  PNG_PLTE = 1 << 1,
44 };
45 
47  PNG_IDAT = 1 << 0,
48  PNG_ALLIMAGE = 1 << 1,
49 };
50 
51 typedef struct PNGDecContext {
54 
59 
62  int width, height;
63  int cur_w, cur_h;
64  int last_w, last_h;
69  int bit_depth;
74  int channels;
76  int bpp;
77  int has_trns;
79 
82  uint32_t palette[256];
85  unsigned int last_row_size;
87  unsigned int tmp_row_size;
90  int pass;
91  int crow_size; /* compressed row size (include filter type) */
92  int row_size; /* decompressed row size */
93  int pass_row_size; /* decompress row size of the current pass */
94  int y;
95  z_stream zstream;
97 
98 /* Mask to determine which pixels are valid in a pass */
99 static const uint8_t png_pass_mask[NB_PASSES] = {
100  0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
101 };
102 
103 /* Mask to determine which y pixels can be written in a pass */
105  0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
106 };
107 
108 /* Mask to determine which pixels to overwrite while displaying */
110  0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
111 };
112 
113 /* NOTE: we try to construct a good looking image at each pass. width
114  * is the original image width. We also do pixel format conversion at
115  * this stage */
116 static void png_put_interlaced_row(uint8_t *dst, int width,
117  int bits_per_pixel, int pass,
118  int color_type, const uint8_t *src)
119 {
120  int x, mask, dsp_mask, j, src_x, b, bpp;
121  uint8_t *d;
122  const uint8_t *s;
123 
124  mask = png_pass_mask[pass];
125  dsp_mask = png_pass_dsp_mask[pass];
126 
127  switch (bits_per_pixel) {
128  case 1:
129  src_x = 0;
130  for (x = 0; x < width; x++) {
131  j = (x & 7);
132  if ((dsp_mask << j) & 0x80) {
133  b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
134  dst[x >> 3] &= 0xFF7F>>j;
135  dst[x >> 3] |= b << (7 - j);
136  }
137  if ((mask << j) & 0x80)
138  src_x++;
139  }
140  break;
141  case 2:
142  src_x = 0;
143  for (x = 0; x < width; x++) {
144  int j2 = 2 * (x & 3);
145  j = (x & 7);
146  if ((dsp_mask << j) & 0x80) {
147  b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
148  dst[x >> 2] &= 0xFF3F>>j2;
149  dst[x >> 2] |= b << (6 - j2);
150  }
151  if ((mask << j) & 0x80)
152  src_x++;
153  }
154  break;
155  case 4:
156  src_x = 0;
157  for (x = 0; x < width; x++) {
158  int j2 = 4*(x&1);
159  j = (x & 7);
160  if ((dsp_mask << j) & 0x80) {
161  b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
162  dst[x >> 1] &= 0xFF0F>>j2;
163  dst[x >> 1] |= b << (4 - j2);
164  }
165  if ((mask << j) & 0x80)
166  src_x++;
167  }
168  break;
169  default:
170  bpp = bits_per_pixel >> 3;
171  d = dst;
172  s = src;
173  for (x = 0; x < width; x++) {
174  j = x & 7;
175  if ((dsp_mask << j) & 0x80) {
176  memcpy(d, s, bpp);
177  }
178  d += bpp;
179  if ((mask << j) & 0x80)
180  s += bpp;
181  }
182  break;
183  }
184 }
185 
187  int w, int bpp)
188 {
189  int i;
190  for (i = 0; i < w; i++) {
191  int a, b, c, p, pa, pb, pc;
192 
193  a = dst[i - bpp];
194  b = top[i];
195  c = top[i - bpp];
196 
197  p = b - c;
198  pc = a - c;
199 
200  pa = abs(p);
201  pb = abs(pc);
202  pc = abs(p + pc);
203 
204  if (pa <= pb && pa <= pc)
205  p = a;
206  else if (pb <= pc)
207  p = b;
208  else
209  p = c;
210  dst[i] = p + src[i];
211  }
212 }
213 
214 #define UNROLL1(bpp, op) \
215  { \
216  r = dst[0]; \
217  if (bpp >= 2) \
218  g = dst[1]; \
219  if (bpp >= 3) \
220  b = dst[2]; \
221  if (bpp >= 4) \
222  a = dst[3]; \
223  for (; i <= size - bpp; i += bpp) { \
224  dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
225  if (bpp == 1) \
226  continue; \
227  dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
228  if (bpp == 2) \
229  continue; \
230  dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
231  if (bpp == 3) \
232  continue; \
233  dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
234  } \
235  }
236 
237 #define UNROLL_FILTER(op) \
238  if (bpp == 1) { \
239  UNROLL1(1, op) \
240  } else if (bpp == 2) { \
241  UNROLL1(2, op) \
242  } else if (bpp == 3) { \
243  UNROLL1(3, op) \
244  } else if (bpp == 4) { \
245  UNROLL1(4, op) \
246  } \
247  for (; i < size; i++) { \
248  dst[i] = op(dst[i - bpp], src[i], last[i]); \
249  }
250 
251 /* NOTE: 'dst' can be equal to 'last' */
253  uint8_t *src, uint8_t *last, int size, int bpp)
254 {
255  int i, p, r, g, b, a;
256 
257  switch (filter_type) {
259  memcpy(dst, src, size);
260  break;
262  for (i = 0; i < bpp; i++)
263  dst[i] = src[i];
264  if (bpp == 4) {
265  p = *(int *)dst;
266  for (; i < size; i += bpp) {
267  unsigned s = *(int *)(src + i);
268  p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
269  *(int *)(dst + i) = p;
270  }
271  } else {
272 #define OP_SUB(x, s, l) ((x) + (s))
274  }
275  break;
276  case PNG_FILTER_VALUE_UP:
277  dsp->add_bytes_l2(dst, src, last, size);
278  break;
280  for (i = 0; i < bpp; i++) {
281  p = (last[i] >> 1);
282  dst[i] = p + src[i];
283  }
284 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
286  break;
288  for (i = 0; i < bpp; i++) {
289  p = last[i];
290  dst[i] = p + src[i];
291  }
292  if (bpp > 2 && size > 4) {
293  /* would write off the end of the array if we let it process
294  * the last pixel with bpp=3 */
295  int w = (bpp & 3) ? size - 3 : size;
296 
297  if (w > i) {
298  dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
299  i = w;
300  }
301  }
302  ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
303  break;
304  }
305 }
306 
307 /* This used to be called "deloco" in FFmpeg
308  * and is actually an inverse reversible colorspace transformation */
309 #define YUV2RGB(NAME, TYPE) \
310 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
311 { \
312  int i; \
313  for (i = 0; i < size; i += 3 + alpha) { \
314  int g = dst [i + 1]; \
315  dst[i + 0] += g; \
316  dst[i + 2] += g; \
317  } \
318 }
319 
320 YUV2RGB(rgb8, uint8_t)
321 YUV2RGB(rgb16, uint16_t)
322 
323 /* process exactly one decompressed row */
325 {
326  uint8_t *ptr, *last_row;
327  int got_line;
328 
329  if (!s->interlace_type) {
330  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
331  if (s->y == 0)
332  last_row = s->last_row;
333  else
334  last_row = ptr - s->image_linesize;
335 
336  png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
337  last_row, s->row_size, s->bpp);
338  /* loco lags by 1 row so that it doesn't interfere with top prediction */
339  if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
340  if (s->bit_depth == 16) {
341  deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
342  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
343  } else {
344  deloco_rgb8(ptr - s->image_linesize, s->row_size,
345  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
346  }
347  }
348  s->y++;
349  if (s->y == s->cur_h) {
350  s->pic_state |= PNG_ALLIMAGE;
351  if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
352  if (s->bit_depth == 16) {
353  deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
354  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
355  } else {
356  deloco_rgb8(ptr, s->row_size,
357  s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
358  }
359  }
360  }
361  } else {
362  got_line = 0;
363  for (;;) {
364  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
365  if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
366  /* if we already read one row, it is time to stop to
367  * wait for the next one */
368  if (got_line)
369  break;
370  png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
371  s->last_row, s->pass_row_size, s->bpp);
372  FFSWAP(uint8_t *, s->last_row, s->tmp_row);
373  FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
374  got_line = 1;
375  }
376  if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
377  png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
378  s->color_type, s->last_row);
379  }
380  s->y++;
381  if (s->y == s->cur_h) {
382  memset(s->last_row, 0, s->row_size);
383  for (;;) {
384  if (s->pass == NB_PASSES - 1) {
385  s->pic_state |= PNG_ALLIMAGE;
386  goto the_end;
387  } else {
388  s->pass++;
389  s->y = 0;
390  s->pass_row_size = ff_png_pass_row_size(s->pass,
391  s->bits_per_pixel,
392  s->cur_w);
393  s->crow_size = s->pass_row_size + 1;
394  if (s->pass_row_size != 0)
395  break;
396  /* skip pass if empty row */
397  }
398  }
399  }
400  }
401 the_end:;
402  }
403 }
404 
406 {
407  int ret;
408  s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
409  s->zstream.next_in = (unsigned char *)s->gb.buffer;
410  bytestream2_skip(&s->gb, length);
411 
412  /* decode one line if possible */
413  while (s->zstream.avail_in > 0) {
414  ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
415  if (ret != Z_OK && ret != Z_STREAM_END) {
416  av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
417  return AVERROR_EXTERNAL;
418  }
419  if (s->zstream.avail_out == 0) {
420  if (!(s->pic_state & PNG_ALLIMAGE)) {
421  png_handle_row(s);
422  }
423  s->zstream.avail_out = s->crow_size;
424  s->zstream.next_out = s->crow_buf;
425  }
426  if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
428  "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
429  return 0;
430  }
431  }
432  return 0;
433 }
434 
435 static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
436  const uint8_t *data_end)
437 {
438  z_stream zstream;
439  unsigned char *buf;
440  unsigned buf_size;
441  int ret;
442 
443  zstream.zalloc = ff_png_zalloc;
444  zstream.zfree = ff_png_zfree;
445  zstream.opaque = NULL;
446  if (inflateInit(&zstream) != Z_OK)
447  return AVERROR_EXTERNAL;
448  zstream.next_in = (unsigned char *)data;
449  zstream.avail_in = data_end - data;
451 
452  while (zstream.avail_in > 0) {
453  av_bprint_get_buffer(bp, 2, &buf, &buf_size);
454  if (buf_size < 2) {
455  ret = AVERROR(ENOMEM);
456  goto fail;
457  }
458  zstream.next_out = buf;
459  zstream.avail_out = buf_size - 1;
460  ret = inflate(&zstream, Z_PARTIAL_FLUSH);
461  if (ret != Z_OK && ret != Z_STREAM_END) {
462  ret = AVERROR_EXTERNAL;
463  goto fail;
464  }
465  bp->len += zstream.next_out - buf;
466  if (ret == Z_STREAM_END)
467  break;
468  }
469  inflateEnd(&zstream);
470  bp->str[bp->len] = 0;
471  return 0;
472 
473 fail:
474  inflateEnd(&zstream);
476  return ret;
477 }
478 
479 static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
480 {
481  size_t extra = 0, i;
482  uint8_t *out, *q;
483 
484  for (i = 0; i < size_in; i++)
485  extra += in[i] >= 0x80;
486  if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
487  return NULL;
488  q = out = av_malloc(size_in + extra + 1);
489  if (!out)
490  return NULL;
491  for (i = 0; i < size_in; i++) {
492  if (in[i] >= 0x80) {
493  *(q++) = 0xC0 | (in[i] >> 6);
494  *(q++) = 0x80 | (in[i] & 0x3F);
495  } else {
496  *(q++) = in[i];
497  }
498  }
499  *(q++) = 0;
500  return out;
501 }
502 
503 static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
504  AVDictionary **dict)
505 {
506  int ret, method;
507  const uint8_t *data = s->gb.buffer;
508  const uint8_t *data_end = data + length;
509  const uint8_t *keyword = data;
510  const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
511  uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
512  unsigned text_len;
513  AVBPrint bp;
514 
515  if (!keyword_end)
516  return AVERROR_INVALIDDATA;
517  data = keyword_end + 1;
518 
519  if (compressed) {
520  if (data == data_end)
521  return AVERROR_INVALIDDATA;
522  method = *(data++);
523  if (method)
524  return AVERROR_INVALIDDATA;
525  if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
526  return ret;
527  text_len = bp.len;
528  ret = av_bprint_finalize(&bp, (char **)&text);
529  if (ret < 0)
530  return ret;
531  } else {
532  text = (uint8_t *)data;
533  text_len = data_end - text;
534  }
535 
536  kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
537  txt_utf8 = iso88591_to_utf8(text, text_len);
538  if (text != data)
539  av_free(text);
540  if (!(kw_utf8 && txt_utf8)) {
541  av_free(kw_utf8);
542  av_free(txt_utf8);
543  return AVERROR(ENOMEM);
544  }
545 
546  av_dict_set(dict, kw_utf8, txt_utf8,
548  return 0;
549 }
550 
552  uint32_t length)
553 {
554  if (length != 13)
555  return AVERROR_INVALIDDATA;
556 
557  if (s->pic_state & PNG_IDAT) {
558  av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
559  return AVERROR_INVALIDDATA;
560  }
561 
562  if (s->hdr_state & PNG_IHDR) {
563  av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
564  return AVERROR_INVALIDDATA;
565  }
566 
567  s->width = s->cur_w = bytestream2_get_be32(&s->gb);
568  s->height = s->cur_h = bytestream2_get_be32(&s->gb);
569  if (av_image_check_size(s->width, s->height, 0, avctx)) {
570  s->cur_w = s->cur_h = s->width = s->height = 0;
571  av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
572  return AVERROR_INVALIDDATA;
573  }
574  s->bit_depth = bytestream2_get_byte(&s->gb);
575  if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
576  s->bit_depth != 8 && s->bit_depth != 16) {
577  av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
578  goto error;
579  }
580  s->color_type = bytestream2_get_byte(&s->gb);
581  s->compression_type = bytestream2_get_byte(&s->gb);
582  if (s->compression_type) {
583  av_log(avctx, AV_LOG_ERROR, "Invalid compression method %d\n", s->compression_type);
584  goto error;
585  }
586  s->filter_type = bytestream2_get_byte(&s->gb);
587  s->interlace_type = bytestream2_get_byte(&s->gb);
588  bytestream2_skip(&s->gb, 4); /* crc */
589  s->hdr_state |= PNG_IHDR;
590  if (avctx->debug & FF_DEBUG_PICT_INFO)
591  av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
592  "compression_type=%d filter_type=%d interlace_type=%d\n",
593  s->width, s->height, s->bit_depth, s->color_type,
595 
596  return 0;
597 error:
598  s->cur_w = s->cur_h = s->width = s->height = 0;
599  s->bit_depth = 8;
600  return AVERROR_INVALIDDATA;
601 }
602 
604 {
605  if (s->pic_state & PNG_IDAT) {
606  av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
607  return AVERROR_INVALIDDATA;
608  }
609  avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
610  avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
611  if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
612  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
613  bytestream2_skip(&s->gb, 1); /* unit specifier */
614  bytestream2_skip(&s->gb, 4); /* crc */
615 
616  return 0;
617 }
618 
620  uint32_t length, AVFrame *p)
621 {
622  int ret;
623  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
624 
625  if (!(s->hdr_state & PNG_IHDR)) {
626  av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
627  return AVERROR_INVALIDDATA;
628  }
629  if (!(s->pic_state & PNG_IDAT)) {
630  /* init image info */
631  ret = ff_set_dimensions(avctx, s->width, s->height);
632  if (ret < 0)
633  return ret;
634 
636  s->bits_per_pixel = s->bit_depth * s->channels;
637  s->bpp = (s->bits_per_pixel + 7) >> 3;
638  s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
639 
640  if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
642  avctx->pix_fmt = AV_PIX_FMT_RGB24;
643  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
645  avctx->pix_fmt = AV_PIX_FMT_RGBA;
646  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
648  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
649  } else if (s->bit_depth == 16 &&
651  avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
652  } else if (s->bit_depth == 16 &&
654  avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
655  } else if (s->bit_depth == 16 &&
657  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
658  } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
660  avctx->pix_fmt = AV_PIX_FMT_PAL8;
661  } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
662  avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
663  } else if (s->bit_depth == 8 &&
665  avctx->pix_fmt = AV_PIX_FMT_YA8;
666  } else if (s->bit_depth == 16 &&
668  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
669  } else {
671  "Bit depth %d color type %d",
672  s->bit_depth, s->color_type);
673  return AVERROR_PATCHWELCOME;
674  }
675 
676  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
677  switch (avctx->pix_fmt) {
678  case AV_PIX_FMT_RGB24:
679  avctx->pix_fmt = AV_PIX_FMT_RGBA;
680  break;
681 
682  case AV_PIX_FMT_RGB48BE:
683  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
684  break;
685 
686  case AV_PIX_FMT_GRAY8:
687  avctx->pix_fmt = AV_PIX_FMT_YA8;
688  break;
689 
690  case AV_PIX_FMT_GRAY16BE:
691  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
692  break;
693 
694  default:
695  avpriv_request_sample(avctx, "bit depth %d "
696  "and color type %d with TRNS",
697  s->bit_depth, s->color_type);
698  return AVERROR_INVALIDDATA;
699  }
700 
701  s->bpp += byte_depth;
702  }
703 
704  if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
705  return ret;
708  if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
709  return ret;
710  }
712  p->key_frame = 1;
714 
715  ff_thread_finish_setup(avctx);
716 
717  /* compute the compressed row size */
718  if (!s->interlace_type) {
719  s->crow_size = s->row_size + 1;
720  } else {
721  s->pass = 0;
723  s->bits_per_pixel,
724  s->cur_w);
725  s->crow_size = s->pass_row_size + 1;
726  }
727  ff_dlog(avctx, "row_size=%d crow_size =%d\n",
728  s->row_size, s->crow_size);
729  s->image_buf = p->data[0];
730  s->image_linesize = p->linesize[0];
731  /* copy the palette if needed */
732  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
733  memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
734  /* empty row is used if differencing to the first row */
736  if (!s->last_row)
737  return AVERROR_INVALIDDATA;
738  if (s->interlace_type ||
741  if (!s->tmp_row)
742  return AVERROR_INVALIDDATA;
743  }
744  /* compressed row */
746  if (!s->buffer)
747  return AVERROR(ENOMEM);
748 
749  /* we want crow_buf+1 to be 16-byte aligned */
750  s->crow_buf = s->buffer + 15;
751  s->zstream.avail_out = s->crow_size;
752  s->zstream.next_out = s->crow_buf;
753  }
754 
755  s->pic_state |= PNG_IDAT;
756 
757  /* set image to non-transparent bpp while decompressing */
759  s->bpp -= byte_depth;
760 
761  ret = png_decode_idat(s, length);
762 
764  s->bpp += byte_depth;
765 
766  if (ret < 0)
767  return ret;
768 
769  bytestream2_skip(&s->gb, 4); /* crc */
770 
771  return 0;
772 }
773 
775  uint32_t length)
776 {
777  int n, i, r, g, b;
778 
779  if ((length % 3) != 0 || length > 256 * 3)
780  return AVERROR_INVALIDDATA;
781  /* read the palette */
782  n = length / 3;
783  for (i = 0; i < n; i++) {
784  r = bytestream2_get_byte(&s->gb);
785  g = bytestream2_get_byte(&s->gb);
786  b = bytestream2_get_byte(&s->gb);
787  s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
788  }
789  for (; i < 256; i++)
790  s->palette[i] = (0xFFU << 24);
791  s->hdr_state |= PNG_PLTE;
792  bytestream2_skip(&s->gb, 4); /* crc */
793 
794  return 0;
795 }
796 
798  uint32_t length)
799 {
800  int v, i;
801 
802  if (!(s->hdr_state & PNG_IHDR)) {
803  av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
804  return AVERROR_INVALIDDATA;
805  }
806 
807  if (s->pic_state & PNG_IDAT) {
808  av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
809  return AVERROR_INVALIDDATA;
810  }
811 
813  if (length > 256 || !(s->hdr_state & PNG_PLTE))
814  return AVERROR_INVALIDDATA;
815 
816  for (i = 0; i < length; i++) {
817  unsigned v = bytestream2_get_byte(&s->gb);
818  s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
819  }
820  } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
821  if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
822  (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
823  s->bit_depth == 1)
824  return AVERROR_INVALIDDATA;
825 
826  for (i = 0; i < length / 2; i++) {
827  /* only use the least significant bits */
828  v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
829 
830  if (s->bit_depth > 8)
831  AV_WB16(&s->transparent_color_be[2 * i], v);
832  else
833  s->transparent_color_be[i] = v;
834  }
835  } else {
836  return AVERROR_INVALIDDATA;
837  }
838 
839  bytestream2_skip(&s->gb, 4); /* crc */
840  s->has_trns = 1;
841 
842  return 0;
843 }
844 
846 {
847  int ret, cnt = 0;
848  uint8_t *data, profile_name[82];
849  AVBPrint bp;
850  AVFrameSideData *sd;
851 
852  while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
853  if (cnt > 80) {
854  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
855  return AVERROR_INVALIDDATA;
856  }
857 
858  length = FFMAX(length - cnt, 0);
859 
860  if (bytestream2_get_byte(&s->gb) != 0) {
861  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
862  return AVERROR_INVALIDDATA;
863  }
864 
865  length = FFMAX(length - 1, 0);
866 
867  if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
868  return ret;
869 
870  ret = av_bprint_finalize(&bp, (char **)&data);
871  if (ret < 0)
872  return ret;
873 
875  if (!sd) {
876  av_free(data);
877  return AVERROR(ENOMEM);
878  }
879 
880  av_dict_set(&sd->metadata, "name", profile_name, 0);
881  memcpy(sd->data, data, bp.len);
882  av_free(data);
883 
884  /* ICC compressed data and CRC */
885  bytestream2_skip(&s->gb, length + 4);
886 
887  return 0;
888 }
889 
891 {
892  if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
893  int i, j, k;
894  uint8_t *pd = p->data[0];
895  for (j = 0; j < s->height; j++) {
896  i = s->width / 8;
897  for (k = 7; k >= 1; k--)
898  if ((s->width&7) >= k)
899  pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
900  for (i--; i >= 0; i--) {
901  pd[8*i + 7]= pd[i] & 1;
902  pd[8*i + 6]= (pd[i]>>1) & 1;
903  pd[8*i + 5]= (pd[i]>>2) & 1;
904  pd[8*i + 4]= (pd[i]>>3) & 1;
905  pd[8*i + 3]= (pd[i]>>4) & 1;
906  pd[8*i + 2]= (pd[i]>>5) & 1;
907  pd[8*i + 1]= (pd[i]>>6) & 1;
908  pd[8*i + 0]= pd[i]>>7;
909  }
910  pd += s->image_linesize;
911  }
912  } else if (s->bits_per_pixel == 2) {
913  int i, j;
914  uint8_t *pd = p->data[0];
915  for (j = 0; j < s->height; j++) {
916  i = s->width / 4;
918  if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
919  if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
920  if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
921  for (i--; i >= 0; i--) {
922  pd[4*i + 3]= pd[i] & 3;
923  pd[4*i + 2]= (pd[i]>>2) & 3;
924  pd[4*i + 1]= (pd[i]>>4) & 3;
925  pd[4*i + 0]= pd[i]>>6;
926  }
927  } else {
928  if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
929  if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
930  if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
931  for (i--; i >= 0; i--) {
932  pd[4*i + 3]= ( pd[i] & 3)*0x55;
933  pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
934  pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
935  pd[4*i + 0]= ( pd[i]>>6 )*0x55;
936  }
937  }
938  pd += s->image_linesize;
939  }
940  } else if (s->bits_per_pixel == 4) {
941  int i, j;
942  uint8_t *pd = p->data[0];
943  for (j = 0; j < s->height; j++) {
944  i = s->width/2;
946  if (s->width&1) pd[2*i+0]= pd[i]>>4;
947  for (i--; i >= 0; i--) {
948  pd[2*i + 1] = pd[i] & 15;
949  pd[2*i + 0] = pd[i] >> 4;
950  }
951  } else {
952  if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
953  for (i--; i >= 0; i--) {
954  pd[2*i + 1] = (pd[i] & 15) * 0x11;
955  pd[2*i + 0] = (pd[i] >> 4) * 0x11;
956  }
957  }
958  pd += s->image_linesize;
959  }
960  }
961 }
962 
964  uint32_t length)
965 {
966  uint32_t sequence_number;
968 
969  if (length != 26)
970  return AVERROR_INVALIDDATA;
971 
972  if (!(s->hdr_state & PNG_IHDR)) {
973  av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
974  return AVERROR_INVALIDDATA;
975  }
976 
977  s->last_w = s->cur_w;
978  s->last_h = s->cur_h;
979  s->last_x_offset = s->x_offset;
980  s->last_y_offset = s->y_offset;
981  s->last_dispose_op = s->dispose_op;
982 
983  sequence_number = bytestream2_get_be32(&s->gb);
984  cur_w = bytestream2_get_be32(&s->gb);
985  cur_h = bytestream2_get_be32(&s->gb);
986  x_offset = bytestream2_get_be32(&s->gb);
987  y_offset = bytestream2_get_be32(&s->gb);
988  bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
989  dispose_op = bytestream2_get_byte(&s->gb);
990  blend_op = bytestream2_get_byte(&s->gb);
991  bytestream2_skip(&s->gb, 4); /* crc */
992 
993  if (sequence_number == 0 &&
994  (cur_w != s->width ||
995  cur_h != s->height ||
996  x_offset != 0 ||
997  y_offset != 0) ||
998  cur_w <= 0 || cur_h <= 0 ||
999  x_offset < 0 || y_offset < 0 ||
1000  cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
1001  return AVERROR_INVALIDDATA;
1002 
1003  if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
1004  av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
1005  return AVERROR_INVALIDDATA;
1006  }
1007 
1008  if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
1009  dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
1010  // No previous frame to revert to for the first frame
1011  // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
1012  dispose_op = APNG_DISPOSE_OP_BACKGROUND;
1013  }
1014 
1015  if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
1016  avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
1017  avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1018  avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
1019  avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
1020  avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
1021  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
1022  )) {
1023  // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
1024  blend_op = APNG_BLEND_OP_SOURCE;
1025  }
1026 
1027  s->cur_w = cur_w;
1028  s->cur_h = cur_h;
1029  s->x_offset = x_offset;
1030  s->y_offset = y_offset;
1031  s->dispose_op = dispose_op;
1032  s->blend_op = blend_op;
1033 
1034  return 0;
1035 }
1036 
1038 {
1039  int i, j;
1040  uint8_t *pd = p->data[0];
1041  uint8_t *pd_last = s->last_picture.f->data[0];
1042  int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
1043 
1044  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1045  for (j = 0; j < s->height; j++) {
1046  for (i = 0; i < ls; i++)
1047  pd[i] += pd_last[i];
1048  pd += s->image_linesize;
1049  pd_last += s->image_linesize;
1050  }
1051 }
1052 
1053 // divide by 255 and round to nearest
1054 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
1055 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1056 
1058  AVFrame *p)
1059 {
1060  size_t x, y;
1061  uint8_t *buffer;
1062 
1063  if (s->blend_op == APNG_BLEND_OP_OVER &&
1064  avctx->pix_fmt != AV_PIX_FMT_RGBA &&
1065  avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
1066  avctx->pix_fmt != AV_PIX_FMT_PAL8) {
1067  avpriv_request_sample(avctx, "Blending with pixel format %s",
1068  av_get_pix_fmt_name(avctx->pix_fmt));
1069  return AVERROR_PATCHWELCOME;
1070  }
1071 
1072  buffer = av_malloc_array(s->image_linesize, s->height);
1073  if (!buffer)
1074  return AVERROR(ENOMEM);
1075 
1076 
1077  // Do the disposal operation specified by the last frame on the frame
1079  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1080  memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
1081 
1083  for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
1084  memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
1085 
1086  memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
1088  } else {
1089  ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
1090  memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
1091  }
1092 
1093  // Perform blending
1094  if (s->blend_op == APNG_BLEND_OP_SOURCE) {
1095  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1096  size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
1097  memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
1098  }
1099  } else { // APNG_BLEND_OP_OVER
1100  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1101  uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
1102  uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
1103  for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
1104  size_t b;
1105  uint8_t foreground_alpha, background_alpha, output_alpha;
1106  uint8_t output[10];
1107 
1108  // Since we might be blending alpha onto alpha, we use the following equations:
1109  // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
1110  // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
1111 
1112  switch (avctx->pix_fmt) {
1113  case AV_PIX_FMT_RGBA:
1114  foreground_alpha = foreground[3];
1115  background_alpha = background[3];
1116  break;
1117 
1118  case AV_PIX_FMT_GRAY8A:
1119  foreground_alpha = foreground[1];
1120  background_alpha = background[1];
1121  break;
1122 
1123  case AV_PIX_FMT_PAL8:
1124  foreground_alpha = s->palette[foreground[0]] >> 24;
1125  background_alpha = s->palette[background[0]] >> 24;
1126  break;
1127  }
1128 
1129  if (foreground_alpha == 0)
1130  continue;
1131 
1132  if (foreground_alpha == 255) {
1133  memcpy(background, foreground, s->bpp);
1134  continue;
1135  }
1136 
1137  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1138  // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
1139  avpriv_request_sample(avctx, "Alpha blending palette samples");
1140  background[0] = foreground[0];
1141  continue;
1142  }
1143 
1144  output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
1145 
1146  av_assert0(s->bpp <= 10);
1147 
1148  for (b = 0; b < s->bpp - 1; ++b) {
1149  if (output_alpha == 0) {
1150  output[b] = 0;
1151  } else if (background_alpha == 255) {
1152  output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
1153  } else {
1154  output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
1155  }
1156  }
1157  output[b] = output_alpha;
1158  memcpy(background, output, s->bpp);
1159  }
1160  }
1161  }
1162 
1163  // Copy blended buffer into the frame and free
1164  memcpy(p->data[0], buffer, s->image_linesize * s->height);
1165  av_free(buffer);
1166 
1167  return 0;
1168 }
1169 
1171  AVFrame *p, AVPacket *avpkt)
1172 {
1173  AVDictionary **metadatap = NULL;
1174  uint32_t tag, length;
1175  int decode_next_dat = 0;
1176  int i, ret;
1177 
1178  for (;;) {
1179  length = bytestream2_get_bytes_left(&s->gb);
1180  if (length <= 0) {
1181 
1182  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1183  avctx->skip_frame == AVDISCARD_ALL) {
1184  return 0;
1185  }
1186 
1187  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
1188  if (!(s->pic_state & PNG_IDAT))
1189  return 0;
1190  else
1191  goto exit_loop;
1192  }
1193  av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
1194  if ( s->pic_state & PNG_ALLIMAGE
1196  goto exit_loop;
1197  ret = AVERROR_INVALIDDATA;
1198  goto fail;
1199  }
1200 
1201  length = bytestream2_get_be32(&s->gb);
1202  if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
1203  av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
1204  ret = AVERROR_INVALIDDATA;
1205  goto fail;
1206  }
1207  tag = bytestream2_get_le32(&s->gb);
1208  if (avctx->debug & FF_DEBUG_STARTCODE)
1209  av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
1210  av_fourcc2str(tag), length);
1211 
1212  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1213  avctx->skip_frame == AVDISCARD_ALL) {
1214  switch(tag) {
1215  case MKTAG('I', 'H', 'D', 'R'):
1216  case MKTAG('p', 'H', 'Y', 's'):
1217  case MKTAG('t', 'E', 'X', 't'):
1218  case MKTAG('I', 'D', 'A', 'T'):
1219  case MKTAG('t', 'R', 'N', 'S'):
1220  break;
1221  default:
1222  goto skip_tag;
1223  }
1224  }
1225 
1226  metadatap = &p->metadata;
1227  switch (tag) {
1228  case MKTAG('I', 'H', 'D', 'R'):
1229  if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
1230  goto fail;
1231  break;
1232  case MKTAG('p', 'H', 'Y', 's'):
1233  if ((ret = decode_phys_chunk(avctx, s)) < 0)
1234  goto fail;
1235  break;
1236  case MKTAG('f', 'c', 'T', 'L'):
1237  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1238  goto skip_tag;
1239  if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
1240  goto fail;
1241  decode_next_dat = 1;
1242  break;
1243  case MKTAG('f', 'd', 'A', 'T'):
1244  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1245  goto skip_tag;
1246  if (!decode_next_dat) {
1247  ret = AVERROR_INVALIDDATA;
1248  goto fail;
1249  }
1250  bytestream2_get_be32(&s->gb);
1251  length -= 4;
1252  /* fallthrough */
1253  case MKTAG('I', 'D', 'A', 'T'):
1254  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
1255  goto skip_tag;
1256  if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
1257  goto fail;
1258  break;
1259  case MKTAG('P', 'L', 'T', 'E'):
1260  if (decode_plte_chunk(avctx, s, length) < 0)
1261  goto skip_tag;
1262  break;
1263  case MKTAG('t', 'R', 'N', 'S'):
1264  if (decode_trns_chunk(avctx, s, length) < 0)
1265  goto skip_tag;
1266  break;
1267  case MKTAG('t', 'E', 'X', 't'):
1268  if (decode_text_chunk(s, length, 0, metadatap) < 0)
1269  av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
1270  bytestream2_skip(&s->gb, length + 4);
1271  break;
1272  case MKTAG('z', 'T', 'X', 't'):
1273  if (decode_text_chunk(s, length, 1, metadatap) < 0)
1274  av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
1275  bytestream2_skip(&s->gb, length + 4);
1276  break;
1277  case MKTAG('s', 'T', 'E', 'R'): {
1278  int mode = bytestream2_get_byte(&s->gb);
1280  if (!stereo3d)
1281  goto fail;
1282 
1283  if (mode == 0 || mode == 1) {
1284  stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1285  stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
1286  } else {
1287  av_log(avctx, AV_LOG_WARNING,
1288  "Unknown value in sTER chunk (%d)\n", mode);
1289  }
1290  bytestream2_skip(&s->gb, 4); /* crc */
1291  break;
1292  }
1293  case MKTAG('i', 'C', 'C', 'P'): {
1294  if (decode_iccp_chunk(s, length, p) < 0)
1295  goto fail;
1296  break;
1297  }
1298  case MKTAG('c', 'H', 'R', 'M'): {
1300  if (!mdm) {
1301  ret = AVERROR(ENOMEM);
1302  goto fail;
1303  }
1304 
1305  mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1306  mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1307 
1308  /* RGB Primaries */
1309  for (i = 0; i < 3; i++) {
1310  mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1311  mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1312  }
1313 
1314  mdm->has_primaries = 1;
1315  bytestream2_skip(&s->gb, 4); /* crc */
1316  break;
1317  }
1318  case MKTAG('g', 'A', 'M', 'A'): {
1319  AVBPrint bp;
1320  char *gamma_str;
1321  int num = bytestream2_get_be32(&s->gb);
1322 
1324  av_bprintf(&bp, "%i/%i", num, 100000);
1325  ret = av_bprint_finalize(&bp, &gamma_str);
1326  if (ret < 0)
1327  return ret;
1328 
1329  av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
1330 
1331  bytestream2_skip(&s->gb, 4); /* crc */
1332  break;
1333  }
1334  case MKTAG('I', 'E', 'N', 'D'):
1335  if (!(s->pic_state & PNG_ALLIMAGE))
1336  av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
1337  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1338  ret = AVERROR_INVALIDDATA;
1339  goto fail;
1340  }
1341  bytestream2_skip(&s->gb, 4); /* crc */
1342  goto exit_loop;
1343  default:
1344  /* skip tag */
1345 skip_tag:
1346  bytestream2_skip(&s->gb, length + 4);
1347  break;
1348  }
1349  }
1350 exit_loop:
1351 
1352  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1353  avctx->skip_frame == AVDISCARD_ALL) {
1354  return 0;
1355  }
1356 
1357  if (s->bits_per_pixel <= 4)
1358  handle_small_bpp(s, p);
1359 
1360  /* apply transparency if needed */
1361  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
1362  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
1363  size_t raw_bpp = s->bpp - byte_depth;
1364  unsigned x, y;
1365 
1366  av_assert0(s->bit_depth > 1);
1367 
1368  for (y = 0; y < s->height; ++y) {
1369  uint8_t *row = &s->image_buf[s->image_linesize * y];
1370 
1371  if (s->bpp == 2 && byte_depth == 1) {
1372  uint8_t *pixel = &row[2 * s->width - 1];
1373  uint8_t *rowp = &row[1 * s->width - 1];
1374  int tcolor = s->transparent_color_be[0];
1375  for (x = s->width; x > 0; --x) {
1376  *pixel-- = *rowp == tcolor ? 0 : 0xff;
1377  *pixel-- = *rowp--;
1378  }
1379  } else if (s->bpp == 4 && byte_depth == 1) {
1380  uint8_t *pixel = &row[4 * s->width - 1];
1381  uint8_t *rowp = &row[3 * s->width - 1];
1382  int tcolor = AV_RL24(s->transparent_color_be);
1383  for (x = s->width; x > 0; --x) {
1384  *pixel-- = AV_RL24(rowp-2) == tcolor ? 0 : 0xff;
1385  *pixel-- = *rowp--;
1386  *pixel-- = *rowp--;
1387  *pixel-- = *rowp--;
1388  }
1389  } else {
1390  /* since we're updating in-place, we have to go from right to left */
1391  for (x = s->width; x > 0; --x) {
1392  uint8_t *pixel = &row[s->bpp * (x - 1)];
1393  memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1394 
1395  if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
1396  memset(&pixel[raw_bpp], 0, byte_depth);
1397  } else {
1398  memset(&pixel[raw_bpp], 0xff, byte_depth);
1399  }
1400  }
1401  }
1402  }
1403  }
1404 
1405  /* handle P-frames only if a predecessor frame is available */
1406  if (s->last_picture.f->data[0]) {
1407  if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
1408  && s->last_picture.f->width == p->width
1409  && s->last_picture.f->height== p->height
1410  && s->last_picture.f->format== p->format
1411  ) {
1412  if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
1413  handle_p_frame_png(s, p);
1414  else if (CONFIG_APNG_DECODER &&
1415  s->previous_picture.f->width == p->width &&
1416  s->previous_picture.f->height== p->height &&
1417  s->previous_picture.f->format== p->format &&
1418  avctx->codec_id == AV_CODEC_ID_APNG &&
1419  (ret = handle_p_frame_apng(avctx, s, p)) < 0)
1420  goto fail;
1421  }
1422  }
1423  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1425 
1426  return 0;
1427 
1428 fail:
1429  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1431  return ret;
1432 }
1433 
1434 #if CONFIG_PNG_DECODER
1435 static int decode_frame_png(AVCodecContext *avctx,
1436  void *data, int *got_frame,
1437  AVPacket *avpkt)
1438 {
1439  PNGDecContext *const s = avctx->priv_data;
1440  const uint8_t *buf = avpkt->data;
1441  int buf_size = avpkt->size;
1442  AVFrame *p;
1443  int64_t sig;
1444  int ret;
1445 
1448  p = s->picture.f;
1449 
1450  bytestream2_init(&s->gb, buf, buf_size);
1451 
1452  /* check signature */
1453  sig = bytestream2_get_be64(&s->gb);
1454  if (sig != PNGSIG &&
1455  sig != MNGSIG) {
1456  av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
1457  return AVERROR_INVALIDDATA;
1458  }
1459 
1460  s->y = s->has_trns = 0;
1461  s->hdr_state = 0;
1462  s->pic_state = 0;
1463 
1464  /* init the zlib */
1465  s->zstream.zalloc = ff_png_zalloc;
1466  s->zstream.zfree = ff_png_zfree;
1467  s->zstream.opaque = NULL;
1468  ret = inflateInit(&s->zstream);
1469  if (ret != Z_OK) {
1470  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1471  return AVERROR_EXTERNAL;
1472  }
1473 
1474  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1475  goto the_end;
1476 
1477  if (avctx->skip_frame == AVDISCARD_ALL) {
1478  *got_frame = 0;
1479  ret = bytestream2_tell(&s->gb);
1480  goto the_end;
1481  }
1482 
1483  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1484  goto the_end;
1485 
1486  *got_frame = 1;
1487 
1488  ret = bytestream2_tell(&s->gb);
1489 the_end:
1490  inflateEnd(&s->zstream);
1491  s->crow_buf = NULL;
1492  return ret;
1493 }
1494 #endif
1495 
1496 #if CONFIG_APNG_DECODER
1497 static int decode_frame_apng(AVCodecContext *avctx,
1498  void *data, int *got_frame,
1499  AVPacket *avpkt)
1500 {
1501  PNGDecContext *const s = avctx->priv_data;
1502  int ret;
1503  AVFrame *p;
1504 
1507  p = s->picture.f;
1508 
1509  if (!(s->hdr_state & PNG_IHDR)) {
1510  if (!avctx->extradata_size)
1511  return AVERROR_INVALIDDATA;
1512 
1513  /* only init fields, there is no zlib use in extradata */
1514  s->zstream.zalloc = ff_png_zalloc;
1515  s->zstream.zfree = ff_png_zfree;
1516 
1517  bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
1518  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1519  goto end;
1520  }
1521 
1522  /* reset state for a new frame */
1523  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1524  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1525  ret = AVERROR_EXTERNAL;
1526  goto end;
1527  }
1528  s->y = 0;
1529  s->pic_state = 0;
1530  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1531  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1532  goto end;
1533 
1534  if (!(s->pic_state & PNG_ALLIMAGE))
1535  av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
1536  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1537  ret = AVERROR_INVALIDDATA;
1538  goto end;
1539  }
1540  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1541  goto end;
1542 
1543  *got_frame = 1;
1544  ret = bytestream2_tell(&s->gb);
1545 
1546 end:
1547  inflateEnd(&s->zstream);
1548  return ret;
1549 }
1550 #endif
1551 
1552 #if CONFIG_LSCR_DECODER
1553 static int decode_frame_lscr(AVCodecContext *avctx,
1554  void *data, int *got_frame,
1555  AVPacket *avpkt)
1556 {
1557  PNGDecContext *const s = avctx->priv_data;
1558  GetByteContext *gb = &s->gb;
1559  AVFrame *frame = data;
1560  int ret, nb_blocks, offset = 0;
1561 
1562  if (avpkt->size < 2)
1563  return AVERROR_INVALIDDATA;
1564 
1565  bytestream2_init(gb, avpkt->data, avpkt->size);
1566 
1567  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1568  return ret;
1569 
1570  nb_blocks = bytestream2_get_le16(gb);
1571  if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * (12 + 8))
1572  return AVERROR_INVALIDDATA;
1573 
1574  if (s->last_picture.f->data[0]) {
1575  ret = av_frame_copy(frame, s->last_picture.f);
1576  if (ret < 0)
1577  return ret;
1578  }
1579 
1580  for (int b = 0; b < nb_blocks; b++) {
1581  int x, y, x2, y2, w, h, left;
1582  uint32_t csize, size;
1583 
1584  s->zstream.zalloc = ff_png_zalloc;
1585  s->zstream.zfree = ff_png_zfree;
1586  s->zstream.opaque = NULL;
1587 
1588  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1589  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1590  ret = AVERROR_EXTERNAL;
1591  goto end;
1592  }
1593 
1594  bytestream2_seek(gb, 2 + b * 12, SEEK_SET);
1595 
1596  x = bytestream2_get_le16(gb);
1597  y = bytestream2_get_le16(gb);
1598  x2 = bytestream2_get_le16(gb);
1599  y2 = bytestream2_get_le16(gb);
1600  s->width = s->cur_w = w = x2-x;
1601  s->height = s->cur_h = h = y2-y;
1602 
1603  if (w <= 0 || x < 0 || x >= avctx->width || w + x > avctx->width ||
1604  h <= 0 || y < 0 || y >= avctx->height || h + y > avctx->height) {
1605  ret = AVERROR_INVALIDDATA;
1606  goto end;
1607  }
1608 
1609  size = bytestream2_get_le32(gb);
1610 
1611  frame->key_frame = (nb_blocks == 1) &&
1612  (w == avctx->width) &&
1613  (h == avctx->height) &&
1614  (x == 0) && (y == 0);
1615 
1616  bytestream2_seek(gb, 2 + nb_blocks * 12 + offset, SEEK_SET);
1617  csize = bytestream2_get_be32(gb);
1618  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1619  ret = AVERROR_INVALIDDATA;
1620  goto end;
1621  }
1622 
1623  offset += size;
1624  left = size;
1625 
1626  s->y = 0;
1627  s->row_size = w * 3;
1628 
1629  av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
1630  if (!s->buffer) {
1631  ret = AVERROR(ENOMEM);
1632  goto end;
1633  }
1634 
1636  if (!s->last_row) {
1637  ret = AVERROR(ENOMEM);
1638  goto end;
1639  }
1640 
1641  s->crow_size = w * 3 + 1;
1642  s->crow_buf = s->buffer + 15;
1643  s->zstream.avail_out = s->crow_size;
1644  s->zstream.next_out = s->crow_buf;
1645  s->image_buf = frame->data[0] + (avctx->height - y - 1) * frame->linesize[0] + x * 3;
1646  s->image_linesize =-frame->linesize[0];
1647  s->bpp = 3;
1648  s->pic_state = 0;
1649 
1650  while (left > 16) {
1651  ret = png_decode_idat(s, csize);
1652  if (ret < 0)
1653  goto end;
1654  left -= csize + 16;
1655  if (left > 16) {
1656  bytestream2_skip(gb, 4);
1657  csize = bytestream2_get_be32(gb);
1658  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1659  ret = AVERROR_INVALIDDATA;
1660  goto end;
1661  }
1662  }
1663  }
1664 
1665  inflateEnd(&s->zstream);
1666  }
1667 
1669 
1671  if ((ret = av_frame_ref(s->last_picture.f, frame)) < 0)
1672  return ret;
1673 
1674  *got_frame = 1;
1675 end:
1676  inflateEnd(&s->zstream);
1677 
1678  if (ret < 0)
1679  return ret;
1680  return avpkt->size;
1681 }
1682 
1683 static void decode_flush(AVCodecContext *avctx)
1684 {
1685  PNGDecContext *s = avctx->priv_data;
1686 
1688 }
1689 
1690 #endif
1691 
1692 #if HAVE_THREADS
1693 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1694 {
1695  PNGDecContext *psrc = src->priv_data;
1696  PNGDecContext *pdst = dst->priv_data;
1697  int ret;
1698 
1699  if (dst == src)
1700  return 0;
1701 
1702  ff_thread_release_buffer(dst, &pdst->picture);
1703  if (psrc->picture.f->data[0] &&
1704  (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
1705  return ret;
1706  if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
1707  pdst->width = psrc->width;
1708  pdst->height = psrc->height;
1709  pdst->bit_depth = psrc->bit_depth;
1710  pdst->color_type = psrc->color_type;
1711  pdst->compression_type = psrc->compression_type;
1712  pdst->interlace_type = psrc->interlace_type;
1713  pdst->filter_type = psrc->filter_type;
1714  pdst->cur_w = psrc->cur_w;
1715  pdst->cur_h = psrc->cur_h;
1716  pdst->x_offset = psrc->x_offset;
1717  pdst->y_offset = psrc->y_offset;
1718  pdst->has_trns = psrc->has_trns;
1719  memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
1720 
1721  pdst->dispose_op = psrc->dispose_op;
1722 
1723  memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
1724 
1725  pdst->hdr_state |= psrc->hdr_state;
1726 
1728  if (psrc->last_picture.f->data[0] &&
1729  (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
1730  return ret;
1731 
1733  if (psrc->previous_picture.f->data[0] &&
1734  (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
1735  return ret;
1736  }
1737 
1738  return 0;
1739 }
1740 #endif
1741 
1743 {
1744  PNGDecContext *s = avctx->priv_data;
1745 
1746  avctx->color_range = AVCOL_RANGE_JPEG;
1747 
1748  if (avctx->codec_id == AV_CODEC_ID_LSCR)
1749  avctx->pix_fmt = AV_PIX_FMT_BGR24;
1750 
1751  s->avctx = avctx;
1753  s->last_picture.f = av_frame_alloc();
1754  s->picture.f = av_frame_alloc();
1755  if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
1758  av_frame_free(&s->picture.f);
1759  return AVERROR(ENOMEM);
1760  }
1761 
1762  if (!avctx->internal->is_copy) {
1763  avctx->internal->allocate_progress = 1;
1764  ff_pngdsp_init(&s->dsp);
1765  }
1766 
1767  return 0;
1768 }
1769 
1771 {
1772  PNGDecContext *s = avctx->priv_data;
1773 
1778  ff_thread_release_buffer(avctx, &s->picture);
1779  av_frame_free(&s->picture.f);
1780  av_freep(&s->buffer);
1781  s->buffer_size = 0;
1782  av_freep(&s->last_row);
1783  s->last_row_size = 0;
1784  av_freep(&s->tmp_row);
1785  s->tmp_row_size = 0;
1786 
1787  return 0;
1788 }
1789 
1790 #if CONFIG_APNG_DECODER
1792  .name = "apng",
1793  .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
1794  .type = AVMEDIA_TYPE_VIDEO,
1795  .id = AV_CODEC_ID_APNG,
1796  .priv_data_size = sizeof(PNGDecContext),
1797  .init = png_dec_init,
1798  .close = png_dec_end,
1799  .decode = decode_frame_apng,
1801  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1802  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1803  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1804 };
1805 #endif
1806 
1807 #if CONFIG_PNG_DECODER
1809  .name = "png",
1810  .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
1811  .type = AVMEDIA_TYPE_VIDEO,
1812  .id = AV_CODEC_ID_PNG,
1813  .priv_data_size = sizeof(PNGDecContext),
1814  .init = png_dec_init,
1815  .close = png_dec_end,
1816  .decode = decode_frame_png,
1818  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1819  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1821 };
1822 #endif
1823 
1824 #if CONFIG_LSCR_DECODER
1826  .name = "lscr",
1827  .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
1828  .type = AVMEDIA_TYPE_VIDEO,
1829  .id = AV_CODEC_ID_LSCR,
1830  .priv_data_size = sizeof(PNGDecContext),
1831  .init = png_dec_init,
1832  .close = png_dec_end,
1833  .decode = decode_frame_lscr,
1834  .flush = decode_flush,
1835  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1837 };
1838 #endif
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length, AVFrame *p)
Definition: pngdec.c:619
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:963
#define PNG_FILTER_VALUE_AVG
Definition: png.h:41
static void png_handle_row(PNGDecContext *s)
Definition: pngdec.c:324
ThreadFrame previous_picture
Definition: pngdec.c:56
#define NULL
Definition: coverity.c:32
int last_y_offset
Definition: pngdec.c:66
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane...
Definition: imgutils.c:76
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int width
Definition: pngdec.c:62
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
unsigned int tmp_row_size
Definition: pngdec.c:87
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
const char * g
Definition: vf_curves.c:115
int pass_row_size
Definition: pngdec.c:93
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVDictionary * metadata
Definition: frame.h:205
uint8_t * tmp_row
Definition: pngdec.c:86
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
#define avpriv_request_sample(...)
PNGHeaderState
Definition: pngdec.c:41
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2203
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
int num
Numerator.
Definition: rational.h:59
static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed, AVDictionary **dict)
Definition: pngdec.c:503
int size
Definition: avcodec.h:1481
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1947
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
enum PNGImageState pic_state
Definition: pngdec.c:61
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
discard all
Definition: avcodec.h:814
Views are next to each other.
Definition: stereo3d.h:67
#define PNG_COLOR_TYPE_RGB
Definition: png.h:33
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
Definition: pngdsp.h:28
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
#define PNG_COLOR_TYPE_GRAY_ALPHA
Definition: png.h:35
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3492
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define PNG_COLOR_TYPE_PALETTE
Definition: png.h:32
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int filter_type
Definition: pngdec.c:73
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdec.c:186
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that&#39;s been allocated with av_malloc() or another memory allocation function...
Definition: dict.h:73
#define PNG_FILTER_VALUE_PAETH
Definition: png.h:42
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3043
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
int y_offset
Definition: pngdec.c:65
uint8_t
#define av_cold
Definition: attributes.h:82
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.If the codec allocates writable tables in its init()
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define PNG_COLOR_TYPE_RGB_ALPHA
Definition: png.h:34
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2654
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
AVCodec ff_apng_decoder
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1669
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
Definition: pngdec.c:603
Structure to hold side data for an AVFrame.
Definition: frame.h:201
uint8_t * data
Definition: avcodec.h:1480
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1531
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1821
#define ff_dlog(a,...)
AVDictionary * metadata
metadata.
Definition: frame.h:581
static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
Definition: pngdec.c:845
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
AVCodec ff_lscr_decoder
ptrdiff_t size
Definition: opengl_enc.c:100
unsigned int last_row_size
Definition: pngdec.c:85
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
int cur_h
Definition: pngdec.c:63
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1512
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:774
#define U(x)
Definition: vp56_arith.h:37
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const uint8_t png_pass_dsp_mask[NB_PASSES]
Definition: pngdec.c:109
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
#define AV_BPRINT_SIZE_UNLIMITED
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt)
Definition: pngdec.c:1170
static const uint16_t mask[17]
Definition: lzw.c:38
#define OP_SUB(x, s, l)
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:136
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1037
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
uint8_t * crow_buf
Definition: pngdec.c:83
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int pass
Definition: pngdec.c:90
int ff_png_get_nb_channels(int color_type)
Definition: png.c:49
ThreadFrame picture
Definition: pngdec.c:58
int height
Definition: pngdec.c:62
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define PNGSIG
Definition: png.h:47
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
int bits_per_pixel
Definition: pngdec.c:75
GetByteContext gb
Definition: pngdec.c:55
#define FFMAX(a, b)
Definition: common.h:94
#define NB_PASSES
Definition: png.h:45
#define fail()
Definition: checkasm.h:122
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
uint8_t blend_op
Definition: pngdec.c:67
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1486
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
z_stream zstream
Definition: pngdec.c:95
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define b
Definition: input.c:41
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:146
#define FFMIN(a, b)
Definition: common.h:96
#define PNG_FILTER_VALUE_SUB
Definition: png.h:39
uint32_t palette[256]
Definition: pngdec.c:82
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define PNG_COLOR_TYPE_GRAY
Definition: png.h:31
static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
Definition: pngdec.c:252
int width
picture width / height.
Definition: avcodec.h:1741
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
uint8_t w
Definition: llviddspenc.c:38
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
uint8_t * last_row
Definition: pngdec.c:84
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVCodecContext * avctx
Definition: pngdec.c:53
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
Definition: bprint.c:218
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
Definition: pngdsp.c:43
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end)
Definition: pngdec.c:435
static void error(const char *err)
int channels
Definition: pngdec.c:74
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
if(ret)
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:551
static uint8_t * iso88591_to_utf8(const uint8_t *in, size_t size_in)
Definition: pngdec.c:479
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
static av_cold int png_dec_init(AVCodecContext *avctx)
Definition: pngdec.c:1742
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
Libavcodec external API header.
enum PNGHeaderState hdr_state
Definition: pngdec.c:60
int buffer_size
Definition: pngdec.c:89
static int skip_tag(AVIOContext *in, int32_t tag_name)
Definition: ismindex.c:132
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
enum AVCodecID codec_id
Definition: avcodec.h:1578
#define PNG_FILTER_VALUE_UP
Definition: png.h:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define PNG_FILTER_TYPE_LOCO
Definition: png.h:37
uint8_t last_dispose_op
Definition: pngdec.c:68
#define abs(x)
Definition: cuda_runtime.h:35
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
int debug
debug
Definition: avcodec.h:2653
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1568
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1593
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
uint8_t * data
Definition: frame.h:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
int interlace_type
Definition: pngdec.c:72
PNGImageState
Definition: pngdec.c:46
void * buf
Definition: avisynth_c.h:766
const uint8_t ff_png_pass_ymask[NB_PASSES]
Definition: png.c:25
int image_linesize
Definition: pngdec.c:81
int extradata_size
Definition: avcodec.h:1670
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2634
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:197
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Mastering display metadata capable of representing the color volume of the display used to master the...
int cur_w
Definition: pngdec.c:63
uint8_t transparent_color_be[6]
Definition: pngdec.c:78
#define OP_AVG(x, s, l)
uint8_t * image_buf
Definition: pngdec.c:80
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
uint8_t dispose_op
Definition: pngdec.c:67
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
uint8_t pixel
Definition: tiny_ssim.c:42
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int last_x_offset
Definition: pngdec.c:66
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
#define FAST_DIV255(x)
Definition: pngdec.c:1055
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1057
#define YUV2RGB(NAME, TYPE)
Definition: pngdec.c:309
static const uint8_t png_pass_mask[NB_PASSES]
Definition: pngdec.c:99
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
Y , 8bpp.
Definition: pixfmt.h:74
static av_cold int png_dec_end(AVCodecContext *avctx)
Definition: pngdec.c:1770
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdsp.h:33
common internal api header.
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:890
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
#define PNG_FILTER_VALUE_NONE
Definition: png.h:38
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:797
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
int last_w
Definition: pngdec.c:64
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call...
Definition: utils.c:82
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
Definition: pngdec.c:104
Stereoscopic video.
int den
Denominator.
Definition: rational.h:60
void ff_png_zfree(void *opaque, void *ptr)
Definition: png.c:44
void * priv_data
Definition: avcodec.h:1595
static int png_decode_idat(PNGDecContext *s, int length)
Definition: pngdec.c:405
uint8_t * buffer
Definition: pngdec.c:88
#define av_free(p)
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2667
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1603
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
int row_size
Definition: pngdec.c:92
APNG common header.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
PNGDSPContext dsp
Definition: pngdec.c:52
int compression_type
Definition: pngdec.c:71
int last_h
Definition: pngdec.c:64
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
Definition: png.c:62
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
int bit_depth
Definition: pngdec.c:69
#define av_freep(p)
int color_type
Definition: pngdec.c:70
ThreadFrame last_picture
Definition: pngdec.c:57
#define av_malloc_array(a, b)
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
Definition: pngdec.c:116
#define FFSWAP(type, a, b)
Definition: common.h:99
int crow_size
Definition: pngdec.c:91
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1253
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int x_offset
Definition: pngdec.c:65
#define MKTAG(a, b, c, d)
Definition: common.h:366
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
Definition: png.c:39
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1457
int has_trns
Definition: pngdec.c:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1179
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2631
AVCodec ff_png_decoder
Predicted.
Definition: avutil.h:275
#define UNROLL_FILTER(op)
Definition: pngdec.c:237
#define MNGSIG
Definition: png.h:48