FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 #include "libavutil/mem.h"
45 
46 #define BITSTREAM_READER_LE
47 #include "avcodec.h"
48 #include "bytestream.h"
49 #include "codec_internal.h"
50 #include "decode.h"
51 #include "exif_internal.h"
52 #include "get_bits.h"
53 #include "thread.h"
54 #include "tiff_common.h"
55 #include "vp8.h"
56 
57 #define VP8X_FLAG_ANIMATION 0x02
58 #define VP8X_FLAG_XMP_METADATA 0x04
59 #define VP8X_FLAG_EXIF_METADATA 0x08
60 #define VP8X_FLAG_ALPHA 0x10
61 #define VP8X_FLAG_ICC 0x20
62 
63 #define MAX_PALETTE_SIZE 256
64 #define MAX_CACHE_BITS 11
65 #define NUM_CODE_LENGTH_CODES 19
66 #define HUFFMAN_CODES_PER_META_CODE 5
67 #define NUM_LITERAL_CODES 256
68 #define NUM_LENGTH_CODES 24
69 #define NUM_DISTANCE_CODES 40
70 #define NUM_SHORT_DISTANCES 120
71 #define MAX_HUFFMAN_CODE_LENGTH 15
72 
73 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
77 };
78 
80  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
81 };
82 
83 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
84  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
85  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
86  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
87  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
88  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
89  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
90  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
91  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
92  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
93  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
94  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
95  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
96  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
97  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
98  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
99 };
100 
104 };
105 
111 };
112 
118 };
119 
135 };
136 
143 };
144 
145 /* The structure of WebP lossless is an optional series of transformation data,
146  * followed by the primary image. The primary image also optionally contains
147  * an entropy group mapping if there are multiple entropy groups. There is a
148  * basic image type called an "entropy coded image" that is used for all of
149  * these. The type of each entropy coded image is referred to by the
150  * specification as its role. */
151 enum ImageRole {
152  /* Primary Image: Stores the actual pixels of the image. */
154 
155  /* Entropy Image: Defines which Huffman group to use for different areas of
156  * the primary image. */
158 
159  /* Predictors: Defines which predictor type to use for different areas of
160  * the primary image. */
162 
163  /* Color Transform Data: Defines the color transformation for different
164  * areas of the primary image. */
166 
167  /* Color Index: Stored as an image of height == 1. */
169 
171 };
172 
173 typedef struct HuffReader {
174  VLC vlc; /* Huffman decoder context */
175  int simple; /* whether to use simple mode */
176  int nb_symbols; /* number of coded symbols */
177  uint16_t simple_symbols[2]; /* symbols for simple mode */
178 } HuffReader;
179 
180 typedef struct ImageContext {
181  enum ImageRole role; /* role of this image */
182  AVFrame *frame; /* AVFrame for data */
183  int color_cache_bits; /* color cache size, log2 */
184  uint32_t *color_cache; /* color cache data */
185  int nb_huffman_groups; /* number of huffman groups */
186  HuffReader *huffman_groups; /* reader for each huffman group */
187  /* relative size compared to primary image, log2.
188  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
189  * number of pixels per byte in the primary image (pixel packing) */
192 } ImageContext;
193 
194 typedef struct WebPContext {
195  VP8Context v; /* VP8 Context used for lossy decoding */
196  GetBitContext gb; /* bitstream reader for main image chunk */
197  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
198  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
199  AVCodecContext *avctx; /* parent AVCodecContext */
200  int initialized; /* set once the VP8 context is initialized */
201  int has_alpha; /* has a separate alpha chunk */
202  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
203  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
204  const uint8_t *alpha_data; /* alpha chunk data */
205  int alpha_data_size; /* alpha chunk data size */
206  int has_exif; /* set after an EXIF chunk has been processed */
207  int has_iccp; /* set after an ICCP chunk has been processed */
208  int width; /* image width */
209  int height; /* image height */
210 
211  int nb_transforms; /* number of transforms */
212  enum TransformType transforms[4]; /* transformations used in the image, in order */
213  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
214  * before pixels are unpacked, or same as width otherwise. */
216  int nb_huffman_groups; /* number of huffman groups in the primary image */
217  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
218 } WebPContext;
219 
220 #define GET_PIXEL(frame, x, y) \
221  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
222 
223 #define GET_PIXEL_COMP(frame, x, y, c) \
224  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
225 
227 {
228  int i, j;
229 
230  av_free(img->color_cache);
231  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
232  av_frame_free(&img->frame);
233  if (img->huffman_groups) {
234  for (i = 0; i < img->nb_huffman_groups; i++) {
235  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
236  ff_vlc_free(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
237  }
238  av_free(img->huffman_groups);
239  }
240  memset(img, 0, sizeof(*img));
241 }
242 
244 {
245  if (r->simple) {
246  if (r->nb_symbols == 1)
247  return r->simple_symbols[0];
248  else
249  return r->simple_symbols[get_bits1(gb)];
250  } else
251  return get_vlc2(gb, r->vlc.table, 8, 2);
252 }
253 
254 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
255  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1],
256  uint8_t lens[], uint16_t syms[],
257  int alphabet_size, void *logctx)
258 {
259  unsigned nb_codes = 0;
260  int ret;
261 
262  // Count the number of symbols of each length and transform len_counts
263  // into an array of offsets.
264  for (int len = 1; len <= MAX_HUFFMAN_CODE_LENGTH; ++len) {
265  unsigned cnt = len_counts[len];
266  len_counts[len] = nb_codes;
267  nb_codes += cnt;
268  }
269 
270  for (int sym = 0; sym < alphabet_size; ++sym) {
271  if (code_lengths[sym]) {
272  unsigned idx = len_counts[code_lengths[sym]]++;
273  syms[idx] = sym;
274  lens[idx] = code_lengths[sym];
275  }
276  }
277 
278  if (nb_codes == 0) {
279  // No symbols
280  return AVERROR_INVALIDDATA;
281  }
282  if (nb_codes == 1) {
283  // Special-case 1 symbol since the VLC reader cannot handle it
284  r->nb_symbols = 1;
285  r->simple = 1;
286  r->simple_symbols[0] = syms[0];
287  return 0;
288  }
289 
290  ret = ff_vlc_init_from_lengths(&r->vlc, 8, nb_codes, lens, 1,
291  syms, 2, 2, 0, VLC_INIT_OUTPUT_LE, logctx);
292  if (ret < 0)
293  return ret;
294  r->simple = 0;
295 
296  return 0;
297 }
298 
300 {
301  hc->nb_symbols = get_bits1(&s->gb) + 1;
302 
303  if (get_bits1(&s->gb))
304  hc->simple_symbols[0] = get_bits(&s->gb, 8);
305  else
306  hc->simple_symbols[0] = get_bits1(&s->gb);
307 
308  if (hc->nb_symbols == 2)
309  hc->simple_symbols[1] = get_bits(&s->gb, 8);
310 
311  hc->simple = 1;
312 }
313 
315  int alphabet_size)
316 {
317  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
318  uint8_t *code_lengths;
319  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
320  uint8_t reordered_code_length_code_lengths[NUM_CODE_LENGTH_CODES];
321  uint16_t reordered_code_length_syms[NUM_CODE_LENGTH_CODES];
322  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1] = { 0 };
323  int symbol, max_symbol, prev_code_len, ret;
324  int num_codes = 4 + get_bits(&s->gb, 4);
325 
326  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
327 
328  for (int i = 0; i < num_codes; i++) {
329  unsigned len = get_bits(&s->gb, 3);
330  code_length_code_lengths[code_length_code_order[i]] = len;
331  len_counts[len]++;
332  }
333 
334  if (get_bits1(&s->gb)) {
335  int bits = 2 + 2 * get_bits(&s->gb, 3);
336  max_symbol = 2 + get_bits(&s->gb, bits);
337  if (max_symbol > alphabet_size) {
338  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
339  max_symbol, alphabet_size);
340  return AVERROR_INVALIDDATA;
341  }
342  } else {
343  max_symbol = alphabet_size;
344  }
345 
346  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths, len_counts,
347  reordered_code_length_code_lengths,
348  reordered_code_length_syms,
349  NUM_CODE_LENGTH_CODES, s->avctx);
350  if (ret < 0)
351  return ret;
352 
353  code_lengths = av_malloc_array(alphabet_size, 2 * sizeof(uint8_t) + sizeof(uint16_t));
354  if (!code_lengths) {
355  ret = AVERROR(ENOMEM);
356  goto finish;
357  }
358 
359  prev_code_len = 8;
360  symbol = 0;
361  memset(len_counts, 0, sizeof(len_counts));
362  while (symbol < alphabet_size) {
363  int code_len;
364 
365  if (!max_symbol--)
366  break;
367  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
368  if (code_len < 16U) {
369  /* Code length code [0..15] indicates literal code lengths. */
370  code_lengths[symbol++] = code_len;
371  len_counts[code_len]++;
372  if (code_len)
373  prev_code_len = code_len;
374  } else {
375  int repeat = 0, length = 0;
376  switch (code_len) {
377  default:
379  goto finish;
380  case 16:
381  /* Code 16 repeats the previous non-zero value [3..6] times,
382  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
383  * non-zero value has been emitted, a value of 8 is repeated. */
384  repeat = 3 + get_bits(&s->gb, 2);
385  length = prev_code_len;
386  len_counts[length] += repeat;
387  break;
388  case 17:
389  /* Code 17 emits a streak of zeros [3..10], i.e.,
390  * 3 + ReadBits(3) times. */
391  repeat = 3 + get_bits(&s->gb, 3);
392  break;
393  case 18:
394  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
395  * 11 + ReadBits(7) times. */
396  repeat = 11 + get_bits(&s->gb, 7);
397  break;
398  }
399  if (symbol + repeat > alphabet_size) {
400  av_log(s->avctx, AV_LOG_ERROR,
401  "invalid symbol %d + repeat %d > alphabet size %d\n",
402  symbol, repeat, alphabet_size);
404  goto finish;
405  }
406  while (repeat-- > 0)
407  code_lengths[symbol++] = length;
408  }
409  }
410 
411  ret = huff_reader_build_canonical(hc, code_lengths, len_counts,
412  code_lengths + symbol,
413  (uint16_t*)(code_lengths + 2 * symbol),
414  symbol, s->avctx);
415 
416 finish:
417  ff_vlc_free(&code_len_hc.vlc);
418  av_free(code_lengths);
419  return ret;
420 }
421 
422 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
423  int w, int h);
424 
425 #define PARSE_BLOCK_SIZE(w, h) do { \
426  block_bits = get_bits(&s->gb, 3) + 2; \
427  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
428  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
429 } while (0)
430 
432 {
433  ImageContext *img;
434  int ret, block_bits, blocks_w, blocks_h, x, y, max;
435 
436  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
437 
438  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
439  if (ret < 0)
440  return ret;
441 
442  img = &s->image[IMAGE_ROLE_ENTROPY];
443  img->size_reduction = block_bits;
444 
445  /* the number of huffman groups is determined by the maximum group number
446  * coded in the entropy image */
447  max = 0;
448  for (y = 0; y < img->frame->height; y++) {
449  for (x = 0; x < img->frame->width; x++) {
450  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
451  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
452  int p = p0 << 8 | p1;
453  max = FFMAX(max, p);
454  }
455  }
456  s->nb_huffman_groups = max + 1;
457 
458  return 0;
459 }
460 
462 {
463  int block_bits, blocks_w, blocks_h, ret;
464 
465  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
466 
468  blocks_h);
469  if (ret < 0)
470  return ret;
471 
472  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
473 
474  return 0;
475 }
476 
478 {
479  int block_bits, blocks_w, blocks_h, ret;
480 
481  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
482 
484  blocks_h);
485  if (ret < 0)
486  return ret;
487 
488  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
489 
490  return 0;
491 }
492 
494 {
495  ImageContext *img;
496  int width_bits, index_size, ret, x;
497  uint8_t *ct;
498 
499  index_size = get_bits(&s->gb, 8) + 1;
500 
501  if (index_size <= 2)
502  width_bits = 3;
503  else if (index_size <= 4)
504  width_bits = 2;
505  else if (index_size <= 16)
506  width_bits = 1;
507  else
508  width_bits = 0;
509 
511  index_size, 1);
512  if (ret < 0)
513  return ret;
514 
515  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
516  img->size_reduction = width_bits;
517  if (width_bits > 0)
518  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
519 
520  /* color index values are delta-coded */
521  ct = img->frame->data[0] + 4;
522  for (x = 4; x < img->frame->width * 4; x++, ct++)
523  ct[0] += ct[-4];
524 
525  return 0;
526 }
527 
529  int x, int y)
530 {
531  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
532  int group = 0;
533 
534  if (gimg->size_reduction > 0) {
535  int group_x = x >> gimg->size_reduction;
536  int group_y = y >> gimg->size_reduction;
537  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
538  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
539  group = g0 << 8 | g1;
540  }
541 
542  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
543 }
544 
546 {
547  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
548  img->color_cache[cache_idx] = c;
549 }
550 
552  int w, int h)
553 {
554  ImageContext *img;
555  HuffReader *hg;
556  int i, j, ret, x, y, width;
557 
558  img = &s->image[role];
559  img->role = role;
560 
561  if (!img->frame) {
562  img->frame = av_frame_alloc();
563  if (!img->frame)
564  return AVERROR(ENOMEM);
565  }
566 
567  img->frame->format = AV_PIX_FMT_ARGB;
568  img->frame->width = w;
569  img->frame->height = h;
570 
571  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
572  ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
573  } else
574  ret = av_frame_get_buffer(img->frame, 1);
575  if (ret < 0)
576  return ret;
577 
578  if (get_bits1(&s->gb)) {
579  img->color_cache_bits = get_bits(&s->gb, 4);
580  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
581  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
582  img->color_cache_bits);
583  return AVERROR_INVALIDDATA;
584  }
585  img->color_cache = av_calloc(1 << img->color_cache_bits,
586  sizeof(*img->color_cache));
587  if (!img->color_cache)
588  return AVERROR(ENOMEM);
589  } else {
590  img->color_cache_bits = 0;
591  }
592 
593  img->nb_huffman_groups = 1;
594  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
596  if (ret < 0)
597  return ret;
598  img->nb_huffman_groups = s->nb_huffman_groups;
599  }
600  img->huffman_groups = av_calloc(img->nb_huffman_groups,
602  sizeof(*img->huffman_groups));
603  if (!img->huffman_groups)
604  return AVERROR(ENOMEM);
605 
606  for (i = 0; i < img->nb_huffman_groups; i++) {
607  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
608  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
609  int alphabet_size = alphabet_sizes[j];
610  if (!j && img->color_cache_bits > 0)
611  alphabet_size += 1 << img->color_cache_bits;
612 
613  if (get_bits1(&s->gb)) {
614  read_huffman_code_simple(s, &hg[j]);
615  } else {
616  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
617  if (ret < 0)
618  return ret;
619  }
620  }
621  }
622 
623  width = img->frame->width;
624  if (role == IMAGE_ROLE_ARGB)
625  width = s->reduced_width;
626 
627  x = 0; y = 0;
628  while (y < img->frame->height) {
629  int v;
630 
631  if (get_bits_left(&s->gb) < 0)
632  return AVERROR_INVALIDDATA;
633 
634  hg = get_huffman_group(s, img, x, y);
635  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
636  if (v < NUM_LITERAL_CODES) {
637  /* literal pixel values */
638  uint8_t *p = GET_PIXEL(img->frame, x, y);
639  p[2] = v;
640  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
641  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
642  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
643  if (img->color_cache_bits)
645  x++;
646  if (x == width) {
647  x = 0;
648  y++;
649  }
650  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
651  /* LZ77 backwards mapping */
652  int prefix_code, length, distance, ref_x, ref_y;
653 
654  /* parse length and distance */
655  prefix_code = v - NUM_LITERAL_CODES;
656  if (prefix_code < 4) {
657  length = prefix_code + 1;
658  } else {
659  int extra_bits = (prefix_code - 2) >> 1;
660  int offset = 2 + (prefix_code & 1) << extra_bits;
661  length = offset + get_bits(&s->gb, extra_bits) + 1;
662  }
663  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
664  if (prefix_code > 39U) {
665  av_log(s->avctx, AV_LOG_ERROR,
666  "distance prefix code too large: %d\n", prefix_code);
667  return AVERROR_INVALIDDATA;
668  }
669  if (prefix_code < 4) {
670  distance = prefix_code + 1;
671  } else {
672  int extra_bits = prefix_code - 2 >> 1;
673  int offset = 2 + (prefix_code & 1) << extra_bits;
674  distance = offset + get_bits(&s->gb, extra_bits) + 1;
675  }
676 
677  /* find reference location */
678  if (distance <= NUM_SHORT_DISTANCES) {
679  int xi = lz77_distance_offsets[distance - 1][0];
680  int yi = lz77_distance_offsets[distance - 1][1];
681  distance = FFMAX(1, xi + yi * width);
682  } else {
684  }
685  ref_x = x;
686  ref_y = y;
687  if (distance <= x) {
688  ref_x -= distance;
689  distance = 0;
690  } else {
691  ref_x = 0;
692  distance -= x;
693  }
694  while (distance >= width) {
695  ref_y--;
696  distance -= width;
697  }
698  if (distance > 0) {
699  ref_x = width - distance;
700  ref_y--;
701  }
702  ref_x = FFMAX(0, ref_x);
703  ref_y = FFMAX(0, ref_y);
704 
705  if (ref_y == y && ref_x >= x)
706  return AVERROR_INVALIDDATA;
707 
708  /* copy pixels
709  * source and dest regions can overlap and wrap lines, so just
710  * copy per-pixel */
711  for (i = 0; i < length; i++) {
712  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
713  uint8_t *p = GET_PIXEL(img->frame, x, y);
714 
715  AV_COPY32(p, p_ref);
716  if (img->color_cache_bits)
718  x++;
719  ref_x++;
720  if (x == width) {
721  x = 0;
722  y++;
723  }
724  if (ref_x == width) {
725  ref_x = 0;
726  ref_y++;
727  }
728  if (y == img->frame->height || ref_y == img->frame->height)
729  break;
730  }
731  } else {
732  /* read from color cache */
733  uint8_t *p = GET_PIXEL(img->frame, x, y);
734  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
735 
736  if (!img->color_cache_bits) {
737  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
738  return AVERROR_INVALIDDATA;
739  }
740  if (cache_idx >= 1 << img->color_cache_bits) {
741  av_log(s->avctx, AV_LOG_ERROR,
742  "color cache index out-of-bounds\n");
743  return AVERROR_INVALIDDATA;
744  }
745  AV_WB32(p, img->color_cache[cache_idx]);
746  x++;
747  if (x == width) {
748  x = 0;
749  y++;
750  }
751  }
752  }
753 
754  return 0;
755 }
756 
757 /* PRED_MODE_BLACK */
758 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
759  const uint8_t *p_t, const uint8_t *p_tr)
760 {
761  AV_WB32(p, 0xFF000000);
762 }
763 
764 /* PRED_MODE_L */
765 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
766  const uint8_t *p_t, const uint8_t *p_tr)
767 {
768  AV_COPY32(p, p_l);
769 }
770 
771 /* PRED_MODE_T */
772 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
773  const uint8_t *p_t, const uint8_t *p_tr)
774 {
775  AV_COPY32(p, p_t);
776 }
777 
778 /* PRED_MODE_TR */
779 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
780  const uint8_t *p_t, const uint8_t *p_tr)
781 {
782  AV_COPY32(p, p_tr);
783 }
784 
785 /* PRED_MODE_TL */
786 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
787  const uint8_t *p_t, const uint8_t *p_tr)
788 {
789  AV_COPY32(p, p_tl);
790 }
791 
792 /* PRED_MODE_AVG_T_AVG_L_TR */
793 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
794  const uint8_t *p_t, const uint8_t *p_tr)
795 {
796  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
797  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
798  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
799  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
800 }
801 
802 /* PRED_MODE_AVG_L_TL */
803 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
804  const uint8_t *p_t, const uint8_t *p_tr)
805 {
806  p[0] = p_l[0] + p_tl[0] >> 1;
807  p[1] = p_l[1] + p_tl[1] >> 1;
808  p[2] = p_l[2] + p_tl[2] >> 1;
809  p[3] = p_l[3] + p_tl[3] >> 1;
810 }
811 
812 /* PRED_MODE_AVG_L_T */
813 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
814  const uint8_t *p_t, const uint8_t *p_tr)
815 {
816  p[0] = p_l[0] + p_t[0] >> 1;
817  p[1] = p_l[1] + p_t[1] >> 1;
818  p[2] = p_l[2] + p_t[2] >> 1;
819  p[3] = p_l[3] + p_t[3] >> 1;
820 }
821 
822 /* PRED_MODE_AVG_TL_T */
823 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
824  const uint8_t *p_t, const uint8_t *p_tr)
825 {
826  p[0] = p_tl[0] + p_t[0] >> 1;
827  p[1] = p_tl[1] + p_t[1] >> 1;
828  p[2] = p_tl[2] + p_t[2] >> 1;
829  p[3] = p_tl[3] + p_t[3] >> 1;
830 }
831 
832 /* PRED_MODE_AVG_T_TR */
833 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
834  const uint8_t *p_t, const uint8_t *p_tr)
835 {
836  p[0] = p_t[0] + p_tr[0] >> 1;
837  p[1] = p_t[1] + p_tr[1] >> 1;
838  p[2] = p_t[2] + p_tr[2] >> 1;
839  p[3] = p_t[3] + p_tr[3] >> 1;
840 }
841 
842 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
843 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
844  const uint8_t *p_t, const uint8_t *p_tr)
845 {
846  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
847  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
848  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
849  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
850 }
851 
852 /* PRED_MODE_SELECT */
853 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
854  const uint8_t *p_t, const uint8_t *p_tr)
855 {
856  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
857  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
858  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
859  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
860  if (diff <= 0)
861  AV_COPY32(p, p_t);
862  else
863  AV_COPY32(p, p_l);
864 }
865 
866 /* PRED_MODE_ADD_SUBTRACT_FULL */
867 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
868  const uint8_t *p_t, const uint8_t *p_tr)
869 {
870  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
871  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
872  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
873  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
874 }
875 
876 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
877 {
878  int d = a + b >> 1;
879  return av_clip_uint8(d + (d - c) / 2);
880 }
881 
882 /* PRED_MODE_ADD_SUBTRACT_HALF */
883 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
884  const uint8_t *p_t, const uint8_t *p_tr)
885 {
886  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
887  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
888  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
889  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
890 }
891 
892 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
893  const uint8_t *p_tl, const uint8_t *p_t,
894  const uint8_t *p_tr);
895 
896 static const inv_predict_func inverse_predict[14] = {
901 };
902 
903 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
904 {
905  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
906  uint8_t p[4];
907 
908  dec = GET_PIXEL(frame, x, y);
909  p_l = GET_PIXEL(frame, x - 1, y);
910  p_tl = GET_PIXEL(frame, x - 1, y - 1);
911  p_t = GET_PIXEL(frame, x, y - 1);
912  if (x == frame->width - 1)
913  p_tr = GET_PIXEL(frame, 0, y);
914  else
915  p_tr = GET_PIXEL(frame, x + 1, y - 1);
916 
917  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
918 
919  dec[0] += p[0];
920  dec[1] += p[1];
921  dec[2] += p[2];
922  dec[3] += p[3];
923 }
924 
926 {
927  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
928  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
929  int x, y;
930 
931  for (y = 0; y < img->frame->height; y++) {
932  for (x = 0; x < s->reduced_width; x++) {
933  int tx = x >> pimg->size_reduction;
934  int ty = y >> pimg->size_reduction;
935  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
936 
937  if (x == 0) {
938  if (y == 0)
939  m = PRED_MODE_BLACK;
940  else
941  m = PRED_MODE_T;
942  } else if (y == 0)
943  m = PRED_MODE_L;
944 
945  if (m > 13) {
946  av_log(s->avctx, AV_LOG_ERROR,
947  "invalid predictor mode: %d\n", m);
948  return AVERROR_INVALIDDATA;
949  }
950  inverse_prediction(img->frame, m, x, y);
951  }
952  }
953  return 0;
954 }
955 
956 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
957  uint8_t color)
958 {
959  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
960 }
961 
963 {
964  ImageContext *img, *cimg;
965  int x, y, cx, cy;
966  uint8_t *p, *cp;
967 
968  img = &s->image[IMAGE_ROLE_ARGB];
969  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
970 
971  for (y = 0; y < img->frame->height; y++) {
972  for (x = 0; x < s->reduced_width; x++) {
973  cx = x >> cimg->size_reduction;
974  cy = y >> cimg->size_reduction;
975  cp = GET_PIXEL(cimg->frame, cx, cy);
976  p = GET_PIXEL(img->frame, x, y);
977 
978  p[1] += color_transform_delta(cp[3], p[2]);
979  p[3] += color_transform_delta(cp[2], p[2]) +
980  color_transform_delta(cp[1], p[1]);
981  }
982  }
983  return 0;
984 }
985 
987 {
988  int x, y;
989  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
990 
991  for (y = 0; y < img->frame->height; y++) {
992  for (x = 0; x < s->reduced_width; x++) {
993  uint8_t *p = GET_PIXEL(img->frame, x, y);
994  p[1] += p[2];
995  p[3] += p[2];
996  }
997  }
998  return 0;
999 }
1000 
1002 {
1003  ImageContext *img;
1004  ImageContext *pal;
1005  int i, x, y;
1006  uint8_t *p;
1007 
1008  img = &s->image[IMAGE_ROLE_ARGB];
1009  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1010 
1011  if (pal->size_reduction > 0) { // undo pixel packing
1012  GetBitContext gb_g;
1013  uint8_t *line;
1014  int pixel_bits = 8 >> pal->size_reduction;
1015 
1016  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1017  if (!line)
1018  return AVERROR(ENOMEM);
1019 
1020  for (y = 0; y < img->frame->height; y++) {
1021  p = GET_PIXEL(img->frame, 0, y);
1022  memcpy(line, p, img->frame->linesize[0]);
1023  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1024  skip_bits(&gb_g, 16);
1025  i = 0;
1026  for (x = 0; x < img->frame->width; x++) {
1027  p = GET_PIXEL(img->frame, x, y);
1028  p[2] = get_bits(&gb_g, pixel_bits);
1029  i++;
1030  if (i == 1 << pal->size_reduction) {
1031  skip_bits(&gb_g, 24);
1032  i = 0;
1033  }
1034  }
1035  }
1036  av_free(line);
1037  s->reduced_width = s->width; // we are back to full size
1038  }
1039 
1040  // switch to local palette if it's worth initializing it
1041  if (img->frame->height * img->frame->width > 300) {
1042  uint8_t palette[256 * 4];
1043  const int size = pal->frame->width * 4;
1044  av_assert0(size <= 1024U);
1045  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1046  // set extra entries to transparent black
1047  memset(palette + size, 0, 256 * 4 - size);
1048  for (y = 0; y < img->frame->height; y++) {
1049  for (x = 0; x < img->frame->width; x++) {
1050  p = GET_PIXEL(img->frame, x, y);
1051  i = p[2];
1052  AV_COPY32(p, &palette[i * 4]);
1053  }
1054  }
1055  } else {
1056  for (y = 0; y < img->frame->height; y++) {
1057  for (x = 0; x < img->frame->width; x++) {
1058  p = GET_PIXEL(img->frame, x, y);
1059  i = p[2];
1060  if (i >= pal->frame->width) {
1061  AV_WB32(p, 0x00000000);
1062  } else {
1063  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1064  AV_COPY32(p, pi);
1065  }
1066  }
1067  }
1068  }
1069 
1070  return 0;
1071 }
1072 
1073 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1074 {
1075  WebPContext *s = avctx->priv_data;
1076  if (s->width && s->width != w) {
1077  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1078  s->width, w);
1079  }
1080  s->width = w;
1081  if (s->height && s->height != h) {
1082  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1083  s->height, h);
1084  }
1085  s->height = h;
1086 }
1087 
1089  int *got_frame, const uint8_t *data_start,
1090  unsigned int data_size, int is_alpha_chunk)
1091 {
1092  WebPContext *s = avctx->priv_data;
1093  int w, h, ret, i, used;
1094 
1095  if (!is_alpha_chunk)
1096  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1097 
1098  ret = init_get_bits8(&s->gb, data_start, data_size);
1099  if (ret < 0)
1100  return ret;
1101 
1102  if (!is_alpha_chunk) {
1103  if (get_bits(&s->gb, 8) != 0x2F) {
1104  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1105  return AVERROR_INVALIDDATA;
1106  }
1107 
1108  w = get_bits(&s->gb, 14) + 1;
1109  h = get_bits(&s->gb, 14) + 1;
1110 
1111  update_canvas_size(avctx, w, h);
1112 
1113  ret = ff_set_dimensions(avctx, s->width, s->height);
1114  if (ret < 0)
1115  return ret;
1116 
1117  s->has_alpha = get_bits1(&s->gb);
1118 
1119  if (get_bits(&s->gb, 3) != 0x0) {
1120  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1121  return AVERROR_INVALIDDATA;
1122  }
1123  } else {
1124  if (!s->width || !s->height)
1125  return AVERROR_BUG;
1126  w = s->width;
1127  h = s->height;
1128  }
1129 
1130  /* parse transformations */
1131  s->nb_transforms = 0;
1132  s->reduced_width = s->width;
1133  used = 0;
1134  while (get_bits1(&s->gb)) {
1135  enum TransformType transform = get_bits(&s->gb, 2);
1136  if (used & (1 << transform)) {
1137  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1138  transform);
1140  goto free_and_return;
1141  }
1142  used |= (1 << transform);
1143  s->transforms[s->nb_transforms++] = transform;
1144  switch (transform) {
1145  case PREDICTOR_TRANSFORM:
1147  break;
1148  case COLOR_TRANSFORM:
1150  break;
1153  break;
1154  }
1155  if (ret < 0)
1156  goto free_and_return;
1157  }
1158 
1159  /* decode primary image */
1160  s->image[IMAGE_ROLE_ARGB].frame = p;
1161  if (is_alpha_chunk)
1162  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1164  if (ret < 0)
1165  goto free_and_return;
1166 
1167  /* apply transformations */
1168  for (i = s->nb_transforms - 1; i >= 0; i--) {
1169  switch (s->transforms[i]) {
1170  case PREDICTOR_TRANSFORM:
1172  break;
1173  case COLOR_TRANSFORM:
1175  break;
1176  case SUBTRACT_GREEN:
1178  break;
1181  break;
1182  }
1183  if (ret < 0)
1184  goto free_and_return;
1185  }
1186 
1187  *got_frame = 1;
1188  p->pict_type = AV_PICTURE_TYPE_I;
1189  p->flags |= AV_FRAME_FLAG_KEY;
1190  p->flags |= AV_FRAME_FLAG_LOSSLESS;
1191  ret = data_size;
1192 
1193 free_and_return:
1194  for (i = 0; i < IMAGE_ROLE_NB; i++)
1195  image_ctx_free(&s->image[i]);
1196 
1197  return ret;
1198 }
1199 
1201 {
1202  int x, y, ls;
1203  uint8_t *dec;
1204 
1205  ls = frame->linesize[3];
1206 
1207  /* filter first row using horizontal filter */
1208  dec = frame->data[3] + 1;
1209  for (x = 1; x < frame->width; x++, dec++)
1210  *dec += *(dec - 1);
1211 
1212  /* filter first column using vertical filter */
1213  dec = frame->data[3] + ls;
1214  for (y = 1; y < frame->height; y++, dec += ls)
1215  *dec += *(dec - ls);
1216 
1217  /* filter the rest using the specified filter */
1218  switch (m) {
1220  for (y = 1; y < frame->height; y++) {
1221  dec = frame->data[3] + y * ls + 1;
1222  for (x = 1; x < frame->width; x++, dec++)
1223  *dec += *(dec - 1);
1224  }
1225  break;
1226  case ALPHA_FILTER_VERTICAL:
1227  for (y = 1; y < frame->height; y++) {
1228  dec = frame->data[3] + y * ls + 1;
1229  for (x = 1; x < frame->width; x++, dec++)
1230  *dec += *(dec - ls);
1231  }
1232  break;
1233  case ALPHA_FILTER_GRADIENT:
1234  for (y = 1; y < frame->height; y++) {
1235  dec = frame->data[3] + y * ls + 1;
1236  for (x = 1; x < frame->width; x++, dec++)
1237  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1238  }
1239  break;
1240  }
1241 }
1242 
1244  const uint8_t *data_start,
1245  unsigned int data_size)
1246 {
1247  WebPContext *s = avctx->priv_data;
1248  int x, y, ret;
1249 
1250  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1251  GetByteContext gb;
1252 
1253  bytestream2_init(&gb, data_start, data_size);
1254  for (y = 0; y < s->height; y++)
1255  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1256  s->width);
1257  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1258  uint8_t *ap, *pp;
1259  int alpha_got_frame = 0;
1260 
1261  s->alpha_frame = av_frame_alloc();
1262  if (!s->alpha_frame)
1263  return AVERROR(ENOMEM);
1264 
1265  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1266  data_start, data_size, 1);
1267  if (ret < 0) {
1268  av_frame_free(&s->alpha_frame);
1269  return ret;
1270  }
1271  if (!alpha_got_frame) {
1272  av_frame_free(&s->alpha_frame);
1273  return AVERROR_INVALIDDATA;
1274  }
1275 
1276  /* copy green component of alpha image to alpha plane of primary image */
1277  for (y = 0; y < s->height; y++) {
1278  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1279  pp = p->data[3] + p->linesize[3] * y;
1280  for (x = 0; x < s->width; x++) {
1281  *pp = *ap;
1282  pp++;
1283  ap += 4;
1284  }
1285  }
1286  av_frame_free(&s->alpha_frame);
1287  }
1288 
1289  /* apply alpha filtering */
1290  if (s->alpha_filter)
1291  alpha_inverse_prediction(p, s->alpha_filter);
1292 
1293  return 0;
1294 }
1295 
1297  int *got_frame, uint8_t *data_start,
1298  unsigned int data_size)
1299 {
1300  WebPContext *s = avctx->priv_data;
1301  int ret;
1302 
1303  if (!s->initialized) {
1304  ff_vp8_decode_init(avctx);
1305  s->initialized = 1;
1306  s->v.actually_webp = 1;
1307  }
1308  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1309 
1310  if (data_size > INT_MAX) {
1311  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1312  return AVERROR_PATCHWELCOME;
1313  }
1314 
1315  av_packet_unref(s->pkt);
1316  s->pkt->data = data_start;
1317  s->pkt->size = data_size;
1318 
1319  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1320  if (ret < 0)
1321  return ret;
1322 
1323  if (!*got_frame)
1324  return AVERROR_INVALIDDATA;
1325 
1326  update_canvas_size(avctx, avctx->width, avctx->height);
1327 
1328  if (s->has_alpha) {
1329  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1330  s->alpha_data_size);
1331  if (ret < 0)
1332  return ret;
1333  }
1334  return ret;
1335 }
1336 
1338  int *got_frame, AVPacket *avpkt)
1339 {
1340  WebPContext *s = avctx->priv_data;
1341  GetByteContext gb;
1342  int ret;
1343  uint32_t chunk_type, chunk_size;
1344  int vp8x_flags = 0;
1345 
1346  s->avctx = avctx;
1347  s->width = 0;
1348  s->height = 0;
1349  *got_frame = 0;
1350  s->has_alpha = 0;
1351  s->has_exif = 0;
1352  s->has_iccp = 0;
1353  bytestream2_init(&gb, avpkt->data, avpkt->size);
1354 
1355  if (bytestream2_get_bytes_left(&gb) < 12)
1356  return AVERROR_INVALIDDATA;
1357 
1358  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1359  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1360  return AVERROR_INVALIDDATA;
1361  }
1362 
1363  chunk_size = bytestream2_get_le32(&gb);
1364  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1365  return AVERROR_INVALIDDATA;
1366 
1367  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1368  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1369  return AVERROR_INVALIDDATA;
1370  }
1371 
1372  while (bytestream2_get_bytes_left(&gb) > 8) {
1373  chunk_type = bytestream2_get_le32(&gb);
1374  chunk_size = bytestream2_get_le32(&gb);
1375  if (chunk_size == UINT32_MAX)
1376  return AVERROR_INVALIDDATA;
1377  chunk_size += chunk_size & 1;
1378 
1379  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1380  /* we seem to be running out of data, but it could also be that the
1381  bitstream has trailing junk leading to bogus chunk_size. */
1382  break;
1383  }
1384 
1385  switch (chunk_type) {
1386  case MKTAG('V', 'P', '8', ' '):
1387  if (!*got_frame) {
1388  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1389  avpkt->data + bytestream2_tell(&gb),
1390  chunk_size);
1391  if (ret < 0)
1392  return ret;
1393  }
1394  bytestream2_skip(&gb, chunk_size);
1395  break;
1396  case MKTAG('V', 'P', '8', 'L'):
1397  if (!*got_frame) {
1398  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1399  avpkt->data + bytestream2_tell(&gb),
1400  chunk_size, 0);
1401  if (ret < 0)
1402  return ret;
1403 #if FF_API_CODEC_PROPS
1407 #endif
1408  }
1409  bytestream2_skip(&gb, chunk_size);
1410  break;
1411  case MKTAG('V', 'P', '8', 'X'):
1412  if (s->width || s->height || *got_frame) {
1413  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1414  return AVERROR_INVALIDDATA;
1415  }
1416  vp8x_flags = bytestream2_get_byte(&gb);
1417  bytestream2_skip(&gb, 3);
1418  s->width = bytestream2_get_le24(&gb) + 1;
1419  s->height = bytestream2_get_le24(&gb) + 1;
1420  ret = av_image_check_size(s->width, s->height, 0, avctx);
1421  if (ret < 0)
1422  return ret;
1423  break;
1424  case MKTAG('A', 'L', 'P', 'H'): {
1425  int alpha_header, filter_m, compression;
1426 
1427  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1428  av_log(avctx, AV_LOG_WARNING,
1429  "ALPHA chunk present, but alpha bit not set in the "
1430  "VP8X header\n");
1431  }
1432  if (chunk_size == 0) {
1433  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1434  return AVERROR_INVALIDDATA;
1435  }
1436  alpha_header = bytestream2_get_byte(&gb);
1437  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1438  s->alpha_data_size = chunk_size - 1;
1439  bytestream2_skip(&gb, s->alpha_data_size);
1440 
1441  filter_m = (alpha_header >> 2) & 0x03;
1442  compression = alpha_header & 0x03;
1443 
1444  if (compression > ALPHA_COMPRESSION_VP8L) {
1445  av_log(avctx, AV_LOG_VERBOSE,
1446  "skipping unsupported ALPHA chunk\n");
1447  } else {
1448  s->has_alpha = 1;
1449  s->alpha_compression = compression;
1450  s->alpha_filter = filter_m;
1451  }
1452 
1453  break;
1454  }
1455  case MKTAG('E', 'X', 'I', 'F'): {
1456  AVBufferRef *exif_buf = NULL;
1457 
1458  if (s->has_exif) {
1459  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1460  goto exif_end;
1461  }
1462 
1463  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1464  av_log(avctx, AV_LOG_WARNING,
1465  "EXIF chunk present, but Exif bit not set in the "
1466  "VP8X header\n");
1467 
1468  exif_buf = av_buffer_alloc(chunk_size);
1469  if (!exif_buf) {
1470  av_log(avctx, AV_LOG_WARNING, "unable to allocate EXIF buffer\n");
1471  goto exif_end;
1472  }
1473  s->has_exif = 1;
1474  memcpy(exif_buf->data, gb.buffer, chunk_size);
1475 
1476  ret = ff_decode_exif_attach_buffer(avctx, p, &exif_buf, AV_EXIF_TIFF_HEADER);
1477  if (ret < 0)
1478  av_log(avctx, AV_LOG_WARNING, "unable to attach EXIF buffer\n");
1479 
1480 exif_end:
1481  bytestream2_skip(&gb, chunk_size);
1482  break;
1483  }
1484  case MKTAG('I', 'C', 'C', 'P'): {
1485  AVFrameSideData *sd;
1486 
1487  if (s->has_iccp) {
1488  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1489  bytestream2_skip(&gb, chunk_size);
1490  break;
1491  }
1492  if (!(vp8x_flags & VP8X_FLAG_ICC))
1493  av_log(avctx, AV_LOG_WARNING,
1494  "ICCP chunk present, but ICC Profile bit not set in the "
1495  "VP8X header\n");
1496 
1497  s->has_iccp = 1;
1498 
1499  ret = ff_frame_new_side_data(avctx, p, AV_FRAME_DATA_ICC_PROFILE, chunk_size, &sd);
1500  if (ret < 0)
1501  return ret;
1502 
1503  if (sd) {
1504  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1505  } else {
1506  bytestream2_skip(&gb, chunk_size);
1507  }
1508  break;
1509  }
1510  case MKTAG('A', 'N', 'I', 'M'):
1511  case MKTAG('A', 'N', 'M', 'F'):
1512  case MKTAG('X', 'M', 'P', ' '):
1513  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1514  av_fourcc2str(chunk_type));
1515  bytestream2_skip(&gb, chunk_size);
1516  break;
1517  default:
1518  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1519  av_fourcc2str(chunk_type));
1520  bytestream2_skip(&gb, chunk_size);
1521  break;
1522  }
1523  }
1524 
1525  if (!*got_frame) {
1526  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1527  return AVERROR_INVALIDDATA;
1528  }
1529 
1530  return avpkt->size;
1531 }
1532 
1534 {
1535  WebPContext *s = avctx->priv_data;
1536 
1537  s->pkt = av_packet_alloc();
1538  if (!s->pkt)
1539  return AVERROR(ENOMEM);
1540 
1541  return 0;
1542 }
1543 
1545 {
1546  WebPContext *s = avctx->priv_data;
1547 
1548  av_packet_free(&s->pkt);
1549 
1550  if (s->initialized)
1551  return ff_vp8_decode_free(avctx);
1552 
1553  return 0;
1554 }
1555 
1557  .p.name = "webp",
1558  CODEC_LONG_NAME("WebP image"),
1559  .p.type = AVMEDIA_TYPE_VIDEO,
1560  .p.id = AV_CODEC_ID_WEBP,
1561  .priv_data_size = sizeof(WebPContext),
1564  .close = webp_decode_close,
1565  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1566  .caps_internal = FF_CODEC_CAP_ICC_PROFILES |
1568 };
WebPContext::width
int width
Definition: webp.c:208
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:197
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2814
HuffReader::vlc
VLC vlc
Definition: webp.c:174
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:867
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:70
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1296
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
color
Definition: vf_paletteuse.c:513
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:126
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:108
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:177
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:247
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:104
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:121
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:786
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:772
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
AVFrame::width
int width
Definition: frame.h:507
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:223
AVPacket::data
uint8_t * data
Definition: packet.h:595
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:133
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:117
b
#define b
Definition: input.c:43
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:116
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:185
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:477
FFCodec
Definition: codec_internal.h:127
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:129
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
thread.h
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:212
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:124
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:128
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, const uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1088
HuffReader::simple
int simple
Definition: webp.c:175
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:125
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:202
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:843
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:823
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:199
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:102
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:211
GetBitContext
Definition: get_bits.h:109
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1073
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:205
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:892
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:115
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:59
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:779
ff_webp_decoder
const FFCodec ff_webp_decoder
Definition: webp.c:1556
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:956
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:551
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:138
WebPContext::has_exif
int has_exif
Definition: webp.c:206
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:314
WebPContext::has_alpha
int has_alpha
Definition: webp.c:201
PredictionMode
PredictionMode
Definition: webp.c:120
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:119
ImageContext::frame
AVFrame * frame
Definition: webp.c:182
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1650
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:650
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:903
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
TransformType
TransformType
Definition: webp.c:113
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:130
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1044
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:66
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:79
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:545
bits
uint8_t bits
Definition: vp3data.h:128
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:69
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:853
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:65
ImageContext
Definition: webp.c:180
decode.h
get_bits.h
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:184
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:220
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:191
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:127
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1544
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:186
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
ff_vp8_decode_init
int ff_vp8_decode_init(AVCodecContext *avctx)
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:986
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:176
WebPContext::height
int height
Definition: webp.c:209
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:107
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:876
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:142
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:896
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
tiff_common.h
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:183
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:493
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1533
WebPContext::v
VP8Context v
Definition: webp.c:195
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:73
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:67
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:161
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1200
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:168
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:551
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:758
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:170
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:61
AVPacket::size
int size
Definition: packet.h:596
ff_decode_exif_attach_buffer
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
Definition: decode.c:2484
codec_internal.h
AlphaCompression
AlphaCompression
Definition: webp.c:101
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:114
ImageContext::size_reduction
int size_reduction
Definition: webp.c:190
size
int size
Definition: twinvq_data.h:10344
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2172
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:292
ImageContext::role
enum ImageRole role
Definition: webp.c:181
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:431
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:962
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:60
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
HuffReader
Definition: webp.c:173
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:461
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:131
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:110
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:216
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:793
WebPContext::reduced_width
int reduced_width
Definition: webp.c:215
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:68
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:198
AlphaFilter
AlphaFilter
Definition: webp.c:106
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:132
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:83
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
WebPContext::gb
GetBitContext gb
Definition: webp.c:196
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:925
av_always_inline
#define av_always_inline
Definition: attributes.h:76
HuffmanIndex
HuffmanIndex
Definition: webp.c:137
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:226
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:813
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:243
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:883
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:217
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:803
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, const uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1243
AVCodecContext
main external API structure.
Definition: avcodec.h:439
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:140
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:157
VLC
Definition: vlc.h:50
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1337
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:226
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
WebPContext::initialized
int initialized
Definition: webp.c:200
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:1001
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
WebPContext::alpha_data
const uint8_t * alpha_data
Definition: webp.c:204
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:290
VLC_INIT_OUTPUT_LE
#define VLC_INIT_OUTPUT_LE
Definition: vlc.h:196
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:71
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:109
w
uint8_t w
Definition: llvidencdsp.c:39
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:425
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:122
WebPContext
Definition: webp.c:194
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:572
VP8Context
Definition: vp8.h:161
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
ImageRole
ImageRole
Definition: webp.c:151
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:231
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1649
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:299
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:141
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH+1], uint8_t lens[], uint16_t syms[], int alphabet_size, void *logctx)
Definition: webp.c:254
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:207
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:528
width
#define width
Definition: dsp.h:89
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h264.c:190
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:671
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:833
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:103
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:765
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:123
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:203
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:139
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:153
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:134
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:165