FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 #include "libavutil/mem.h"
45 
46 #define BITSTREAM_READER_LE
47 #include "avcodec.h"
48 #include "bytestream.h"
49 #include "codec_internal.h"
50 #include "decode.h"
51 #include "exif.h"
52 #include "get_bits.h"
53 #include "thread.h"
54 #include "tiff_common.h"
55 #include "vp8.h"
56 
57 #define VP8X_FLAG_ANIMATION 0x02
58 #define VP8X_FLAG_XMP_METADATA 0x04
59 #define VP8X_FLAG_EXIF_METADATA 0x08
60 #define VP8X_FLAG_ALPHA 0x10
61 #define VP8X_FLAG_ICC 0x20
62 
63 #define MAX_PALETTE_SIZE 256
64 #define MAX_CACHE_BITS 11
65 #define NUM_CODE_LENGTH_CODES 19
66 #define HUFFMAN_CODES_PER_META_CODE 5
67 #define NUM_LITERAL_CODES 256
68 #define NUM_LENGTH_CODES 24
69 #define NUM_DISTANCE_CODES 40
70 #define NUM_SHORT_DISTANCES 120
71 #define MAX_HUFFMAN_CODE_LENGTH 15
72 
73 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
77 };
78 
80  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
81 };
82 
83 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
84  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
85  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
86  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
87  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
88  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
89  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
90  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
91  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
92  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
93  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
94  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
95  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
96  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
97  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
98  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
99 };
100 
104 };
105 
111 };
112 
118 };
119 
135 };
136 
143 };
144 
145 /* The structure of WebP lossless is an optional series of transformation data,
146  * followed by the primary image. The primary image also optionally contains
147  * an entropy group mapping if there are multiple entropy groups. There is a
148  * basic image type called an "entropy coded image" that is used for all of
149  * these. The type of each entropy coded image is referred to by the
150  * specification as its role. */
151 enum ImageRole {
152  /* Primary Image: Stores the actual pixels of the image. */
154 
155  /* Entropy Image: Defines which Huffman group to use for different areas of
156  * the primary image. */
158 
159  /* Predictors: Defines which predictor type to use for different areas of
160  * the primary image. */
162 
163  /* Color Transform Data: Defines the color transformation for different
164  * areas of the primary image. */
166 
167  /* Color Index: Stored as an image of height == 1. */
169 
171 };
172 
173 typedef struct HuffReader {
174  VLC vlc; /* Huffman decoder context */
175  int simple; /* whether to use simple mode */
176  int nb_symbols; /* number of coded symbols */
177  uint16_t simple_symbols[2]; /* symbols for simple mode */
178 } HuffReader;
179 
180 typedef struct ImageContext {
181  enum ImageRole role; /* role of this image */
182  AVFrame *frame; /* AVFrame for data */
183  int color_cache_bits; /* color cache size, log2 */
184  uint32_t *color_cache; /* color cache data */
185  int nb_huffman_groups; /* number of huffman groups */
186  HuffReader *huffman_groups; /* reader for each huffman group */
187  /* relative size compared to primary image, log2.
188  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
189  * number of pixels per byte in the primary image (pixel packing) */
192 } ImageContext;
193 
194 typedef struct WebPContext {
195  VP8Context v; /* VP8 Context used for lossy decoding */
196  GetBitContext gb; /* bitstream reader for main image chunk */
197  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
198  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
199  AVCodecContext *avctx; /* parent AVCodecContext */
200  int initialized; /* set once the VP8 context is initialized */
201  int has_alpha; /* has a separate alpha chunk */
202  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
203  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
204  const uint8_t *alpha_data; /* alpha chunk data */
205  int alpha_data_size; /* alpha chunk data size */
206  int has_exif; /* set after an EXIF chunk has been processed */
207  int has_iccp; /* set after an ICCP chunk has been processed */
208  int width; /* image width */
209  int height; /* image height */
210  int lossless; /* indicates lossless or lossy */
211 
212  int nb_transforms; /* number of transforms */
213  enum TransformType transforms[4]; /* transformations used in the image, in order */
214  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
215  * before pixels are unpacked, or same as width otherwise. */
217  int nb_huffman_groups; /* number of huffman groups in the primary image */
218  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
219 } WebPContext;
220 
221 #define GET_PIXEL(frame, x, y) \
222  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
223 
224 #define GET_PIXEL_COMP(frame, x, y, c) \
225  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
226 
228 {
229  int i, j;
230 
231  av_free(img->color_cache);
232  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
233  av_frame_free(&img->frame);
234  if (img->huffman_groups) {
235  for (i = 0; i < img->nb_huffman_groups; i++) {
236  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
237  ff_vlc_free(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
238  }
239  av_free(img->huffman_groups);
240  }
241  memset(img, 0, sizeof(*img));
242 }
243 
245 {
246  if (r->simple) {
247  if (r->nb_symbols == 1)
248  return r->simple_symbols[0];
249  else
250  return r->simple_symbols[get_bits1(gb)];
251  } else
252  return get_vlc2(gb, r->vlc.table, 8, 2);
253 }
254 
255 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
256  int alphabet_size)
257 {
258  int len = 0, sym, code = 0, ret;
259  int max_code_length = 0;
260  uint16_t *codes;
261 
262  /* special-case 1 symbol since the vlc reader cannot handle it */
263  for (sym = 0; sym < alphabet_size; sym++) {
264  if (code_lengths[sym] > 0) {
265  len++;
266  code = sym;
267  if (len > 1)
268  break;
269  }
270  }
271  if (len == 1) {
272  r->nb_symbols = 1;
273  r->simple_symbols[0] = code;
274  r->simple = 1;
275  return 0;
276  }
277 
278  for (sym = 0; sym < alphabet_size; sym++)
279  max_code_length = FFMAX(max_code_length, code_lengths[sym]);
280 
281  if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
282  return AVERROR(EINVAL);
283 
284  codes = av_malloc_array(alphabet_size, sizeof(*codes));
285  if (!codes)
286  return AVERROR(ENOMEM);
287 
288  code = 0;
289  r->nb_symbols = 0;
290  for (len = 1; len <= max_code_length; len++) {
291  for (sym = 0; sym < alphabet_size; sym++) {
292  if (code_lengths[sym] != len)
293  continue;
294  codes[sym] = code++;
295  r->nb_symbols++;
296  }
297  code <<= 1;
298  }
299  if (!r->nb_symbols) {
300  av_free(codes);
301  return AVERROR_INVALIDDATA;
302  }
303 
304  ret = vlc_init(&r->vlc, 8, alphabet_size,
305  code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
306  codes, sizeof(*codes), sizeof(*codes), VLC_INIT_OUTPUT_LE);
307  if (ret < 0) {
308  av_free(codes);
309  return ret;
310  }
311  r->simple = 0;
312 
313  av_free(codes);
314  return 0;
315 }
316 
318 {
319  hc->nb_symbols = get_bits1(&s->gb) + 1;
320 
321  if (get_bits1(&s->gb))
322  hc->simple_symbols[0] = get_bits(&s->gb, 8);
323  else
324  hc->simple_symbols[0] = get_bits1(&s->gb);
325 
326  if (hc->nb_symbols == 2)
327  hc->simple_symbols[1] = get_bits(&s->gb, 8);
328 
329  hc->simple = 1;
330 }
331 
333  int alphabet_size)
334 {
335  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
336  uint8_t *code_lengths;
337  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
338  int i, symbol, max_symbol, prev_code_len, ret;
339  int num_codes = 4 + get_bits(&s->gb, 4);
340 
341  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
342 
343  for (i = 0; i < num_codes; i++)
344  code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
345 
346  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
348  if (ret < 0)
349  return ret;
350 
351  code_lengths = av_mallocz(alphabet_size);
352  if (!code_lengths) {
353  ret = AVERROR(ENOMEM);
354  goto finish;
355  }
356 
357  if (get_bits1(&s->gb)) {
358  int bits = 2 + 2 * get_bits(&s->gb, 3);
359  max_symbol = 2 + get_bits(&s->gb, bits);
360  if (max_symbol > alphabet_size) {
361  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
362  max_symbol, alphabet_size);
364  goto finish;
365  }
366  } else {
367  max_symbol = alphabet_size;
368  }
369 
370  prev_code_len = 8;
371  symbol = 0;
372  while (symbol < alphabet_size) {
373  int code_len;
374 
375  if (!max_symbol--)
376  break;
377  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
378  if (code_len < 16) {
379  /* Code length code [0..15] indicates literal code lengths. */
380  code_lengths[symbol++] = code_len;
381  if (code_len)
382  prev_code_len = code_len;
383  } else {
384  int repeat = 0, length = 0;
385  switch (code_len) {
386  case 16:
387  /* Code 16 repeats the previous non-zero value [3..6] times,
388  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
389  * non-zero value has been emitted, a value of 8 is repeated. */
390  repeat = 3 + get_bits(&s->gb, 2);
391  length = prev_code_len;
392  break;
393  case 17:
394  /* Code 17 emits a streak of zeros [3..10], i.e.,
395  * 3 + ReadBits(3) times. */
396  repeat = 3 + get_bits(&s->gb, 3);
397  break;
398  case 18:
399  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
400  * 11 + ReadBits(7) times. */
401  repeat = 11 + get_bits(&s->gb, 7);
402  break;
403  }
404  if (symbol + repeat > alphabet_size) {
405  av_log(s->avctx, AV_LOG_ERROR,
406  "invalid symbol %d + repeat %d > alphabet size %d\n",
407  symbol, repeat, alphabet_size);
409  goto finish;
410  }
411  while (repeat-- > 0)
412  code_lengths[symbol++] = length;
413  }
414  }
415 
416  ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
417 
418 finish:
419  ff_vlc_free(&code_len_hc.vlc);
420  av_free(code_lengths);
421  return ret;
422 }
423 
424 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
425  int w, int h);
426 
427 #define PARSE_BLOCK_SIZE(w, h) do { \
428  block_bits = get_bits(&s->gb, 3) + 2; \
429  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
430  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
431 } while (0)
432 
434 {
435  ImageContext *img;
436  int ret, block_bits, blocks_w, blocks_h, x, y, max;
437 
438  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
439 
440  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
441  if (ret < 0)
442  return ret;
443 
444  img = &s->image[IMAGE_ROLE_ENTROPY];
445  img->size_reduction = block_bits;
446 
447  /* the number of huffman groups is determined by the maximum group number
448  * coded in the entropy image */
449  max = 0;
450  for (y = 0; y < img->frame->height; y++) {
451  for (x = 0; x < img->frame->width; x++) {
452  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
453  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
454  int p = p0 << 8 | p1;
455  max = FFMAX(max, p);
456  }
457  }
458  s->nb_huffman_groups = max + 1;
459 
460  return 0;
461 }
462 
464 {
465  int block_bits, blocks_w, blocks_h, ret;
466 
467  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
468 
470  blocks_h);
471  if (ret < 0)
472  return ret;
473 
474  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
475 
476  return 0;
477 }
478 
480 {
481  int block_bits, blocks_w, blocks_h, ret;
482 
483  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
484 
486  blocks_h);
487  if (ret < 0)
488  return ret;
489 
490  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
491 
492  return 0;
493 }
494 
496 {
497  ImageContext *img;
498  int width_bits, index_size, ret, x;
499  uint8_t *ct;
500 
501  index_size = get_bits(&s->gb, 8) + 1;
502 
503  if (index_size <= 2)
504  width_bits = 3;
505  else if (index_size <= 4)
506  width_bits = 2;
507  else if (index_size <= 16)
508  width_bits = 1;
509  else
510  width_bits = 0;
511 
513  index_size, 1);
514  if (ret < 0)
515  return ret;
516 
517  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
518  img->size_reduction = width_bits;
519  if (width_bits > 0)
520  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
521 
522  /* color index values are delta-coded */
523  ct = img->frame->data[0] + 4;
524  for (x = 4; x < img->frame->width * 4; x++, ct++)
525  ct[0] += ct[-4];
526 
527  return 0;
528 }
529 
531  int x, int y)
532 {
533  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
534  int group = 0;
535 
536  if (gimg->size_reduction > 0) {
537  int group_x = x >> gimg->size_reduction;
538  int group_y = y >> gimg->size_reduction;
539  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
540  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
541  group = g0 << 8 | g1;
542  }
543 
544  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
545 }
546 
548 {
549  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
550  img->color_cache[cache_idx] = c;
551 }
552 
554  int w, int h)
555 {
556  ImageContext *img;
557  HuffReader *hg;
558  int i, j, ret, x, y, width;
559 
560  img = &s->image[role];
561  img->role = role;
562 
563  if (!img->frame) {
564  img->frame = av_frame_alloc();
565  if (!img->frame)
566  return AVERROR(ENOMEM);
567  }
568 
569  img->frame->format = AV_PIX_FMT_ARGB;
570  img->frame->width = w;
571  img->frame->height = h;
572 
573  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
574  ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
575  } else
576  ret = av_frame_get_buffer(img->frame, 1);
577  if (ret < 0)
578  return ret;
579 
580  if (get_bits1(&s->gb)) {
581  img->color_cache_bits = get_bits(&s->gb, 4);
582  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
583  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
584  img->color_cache_bits);
585  return AVERROR_INVALIDDATA;
586  }
587  img->color_cache = av_calloc(1 << img->color_cache_bits,
588  sizeof(*img->color_cache));
589  if (!img->color_cache)
590  return AVERROR(ENOMEM);
591  } else {
592  img->color_cache_bits = 0;
593  }
594 
595  img->nb_huffman_groups = 1;
596  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
598  if (ret < 0)
599  return ret;
600  img->nb_huffman_groups = s->nb_huffman_groups;
601  }
602  img->huffman_groups = av_calloc(img->nb_huffman_groups,
604  sizeof(*img->huffman_groups));
605  if (!img->huffman_groups)
606  return AVERROR(ENOMEM);
607 
608  for (i = 0; i < img->nb_huffman_groups; i++) {
609  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
610  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
611  int alphabet_size = alphabet_sizes[j];
612  if (!j && img->color_cache_bits > 0)
613  alphabet_size += 1 << img->color_cache_bits;
614 
615  if (get_bits1(&s->gb)) {
616  read_huffman_code_simple(s, &hg[j]);
617  } else {
618  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
619  if (ret < 0)
620  return ret;
621  }
622  }
623  }
624 
625  width = img->frame->width;
626  if (role == IMAGE_ROLE_ARGB)
627  width = s->reduced_width;
628 
629  x = 0; y = 0;
630  while (y < img->frame->height) {
631  int v;
632 
633  if (get_bits_left(&s->gb) < 0)
634  return AVERROR_INVALIDDATA;
635 
636  hg = get_huffman_group(s, img, x, y);
637  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
638  if (v < NUM_LITERAL_CODES) {
639  /* literal pixel values */
640  uint8_t *p = GET_PIXEL(img->frame, x, y);
641  p[2] = v;
642  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
643  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
644  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
645  if (img->color_cache_bits)
647  x++;
648  if (x == width) {
649  x = 0;
650  y++;
651  }
652  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
653  /* LZ77 backwards mapping */
654  int prefix_code, length, distance, ref_x, ref_y;
655 
656  /* parse length and distance */
657  prefix_code = v - NUM_LITERAL_CODES;
658  if (prefix_code < 4) {
659  length = prefix_code + 1;
660  } else {
661  int extra_bits = (prefix_code - 2) >> 1;
662  int offset = 2 + (prefix_code & 1) << extra_bits;
663  length = offset + get_bits(&s->gb, extra_bits) + 1;
664  }
665  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
666  if (prefix_code > 39U) {
667  av_log(s->avctx, AV_LOG_ERROR,
668  "distance prefix code too large: %d\n", prefix_code);
669  return AVERROR_INVALIDDATA;
670  }
671  if (prefix_code < 4) {
672  distance = prefix_code + 1;
673  } else {
674  int extra_bits = prefix_code - 2 >> 1;
675  int offset = 2 + (prefix_code & 1) << extra_bits;
676  distance = offset + get_bits(&s->gb, extra_bits) + 1;
677  }
678 
679  /* find reference location */
680  if (distance <= NUM_SHORT_DISTANCES) {
681  int xi = lz77_distance_offsets[distance - 1][0];
682  int yi = lz77_distance_offsets[distance - 1][1];
683  distance = FFMAX(1, xi + yi * width);
684  } else {
686  }
687  ref_x = x;
688  ref_y = y;
689  if (distance <= x) {
690  ref_x -= distance;
691  distance = 0;
692  } else {
693  ref_x = 0;
694  distance -= x;
695  }
696  while (distance >= width) {
697  ref_y--;
698  distance -= width;
699  }
700  if (distance > 0) {
701  ref_x = width - distance;
702  ref_y--;
703  }
704  ref_x = FFMAX(0, ref_x);
705  ref_y = FFMAX(0, ref_y);
706 
707  if (ref_y == y && ref_x >= x)
708  return AVERROR_INVALIDDATA;
709 
710  /* copy pixels
711  * source and dest regions can overlap and wrap lines, so just
712  * copy per-pixel */
713  for (i = 0; i < length; i++) {
714  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
715  uint8_t *p = GET_PIXEL(img->frame, x, y);
716 
717  AV_COPY32(p, p_ref);
718  if (img->color_cache_bits)
720  x++;
721  ref_x++;
722  if (x == width) {
723  x = 0;
724  y++;
725  }
726  if (ref_x == width) {
727  ref_x = 0;
728  ref_y++;
729  }
730  if (y == img->frame->height || ref_y == img->frame->height)
731  break;
732  }
733  } else {
734  /* read from color cache */
735  uint8_t *p = GET_PIXEL(img->frame, x, y);
736  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
737 
738  if (!img->color_cache_bits) {
739  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
740  return AVERROR_INVALIDDATA;
741  }
742  if (cache_idx >= 1 << img->color_cache_bits) {
743  av_log(s->avctx, AV_LOG_ERROR,
744  "color cache index out-of-bounds\n");
745  return AVERROR_INVALIDDATA;
746  }
747  AV_WB32(p, img->color_cache[cache_idx]);
748  x++;
749  if (x == width) {
750  x = 0;
751  y++;
752  }
753  }
754  }
755 
756  return 0;
757 }
758 
759 /* PRED_MODE_BLACK */
760 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
761  const uint8_t *p_t, const uint8_t *p_tr)
762 {
763  AV_WB32(p, 0xFF000000);
764 }
765 
766 /* PRED_MODE_L */
767 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
768  const uint8_t *p_t, const uint8_t *p_tr)
769 {
770  AV_COPY32(p, p_l);
771 }
772 
773 /* PRED_MODE_T */
774 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
775  const uint8_t *p_t, const uint8_t *p_tr)
776 {
777  AV_COPY32(p, p_t);
778 }
779 
780 /* PRED_MODE_TR */
781 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
782  const uint8_t *p_t, const uint8_t *p_tr)
783 {
784  AV_COPY32(p, p_tr);
785 }
786 
787 /* PRED_MODE_TL */
788 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
789  const uint8_t *p_t, const uint8_t *p_tr)
790 {
791  AV_COPY32(p, p_tl);
792 }
793 
794 /* PRED_MODE_AVG_T_AVG_L_TR */
795 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
796  const uint8_t *p_t, const uint8_t *p_tr)
797 {
798  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
799  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
800  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
801  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
802 }
803 
804 /* PRED_MODE_AVG_L_TL */
805 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
806  const uint8_t *p_t, const uint8_t *p_tr)
807 {
808  p[0] = p_l[0] + p_tl[0] >> 1;
809  p[1] = p_l[1] + p_tl[1] >> 1;
810  p[2] = p_l[2] + p_tl[2] >> 1;
811  p[3] = p_l[3] + p_tl[3] >> 1;
812 }
813 
814 /* PRED_MODE_AVG_L_T */
815 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
816  const uint8_t *p_t, const uint8_t *p_tr)
817 {
818  p[0] = p_l[0] + p_t[0] >> 1;
819  p[1] = p_l[1] + p_t[1] >> 1;
820  p[2] = p_l[2] + p_t[2] >> 1;
821  p[3] = p_l[3] + p_t[3] >> 1;
822 }
823 
824 /* PRED_MODE_AVG_TL_T */
825 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
826  const uint8_t *p_t, const uint8_t *p_tr)
827 {
828  p[0] = p_tl[0] + p_t[0] >> 1;
829  p[1] = p_tl[1] + p_t[1] >> 1;
830  p[2] = p_tl[2] + p_t[2] >> 1;
831  p[3] = p_tl[3] + p_t[3] >> 1;
832 }
833 
834 /* PRED_MODE_AVG_T_TR */
835 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
836  const uint8_t *p_t, const uint8_t *p_tr)
837 {
838  p[0] = p_t[0] + p_tr[0] >> 1;
839  p[1] = p_t[1] + p_tr[1] >> 1;
840  p[2] = p_t[2] + p_tr[2] >> 1;
841  p[3] = p_t[3] + p_tr[3] >> 1;
842 }
843 
844 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
845 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
846  const uint8_t *p_t, const uint8_t *p_tr)
847 {
848  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
849  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
850  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
851  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
852 }
853 
854 /* PRED_MODE_SELECT */
855 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
856  const uint8_t *p_t, const uint8_t *p_tr)
857 {
858  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
859  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
860  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
861  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
862  if (diff <= 0)
863  AV_COPY32(p, p_t);
864  else
865  AV_COPY32(p, p_l);
866 }
867 
868 /* PRED_MODE_ADD_SUBTRACT_FULL */
869 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
870  const uint8_t *p_t, const uint8_t *p_tr)
871 {
872  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
873  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
874  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
875  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
876 }
877 
878 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
879 {
880  int d = a + b >> 1;
881  return av_clip_uint8(d + (d - c) / 2);
882 }
883 
884 /* PRED_MODE_ADD_SUBTRACT_HALF */
885 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
886  const uint8_t *p_t, const uint8_t *p_tr)
887 {
888  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
889  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
890  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
891  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
892 }
893 
894 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
895  const uint8_t *p_tl, const uint8_t *p_t,
896  const uint8_t *p_tr);
897 
898 static const inv_predict_func inverse_predict[14] = {
903 };
904 
905 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
906 {
907  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
908  uint8_t p[4];
909 
910  dec = GET_PIXEL(frame, x, y);
911  p_l = GET_PIXEL(frame, x - 1, y);
912  p_tl = GET_PIXEL(frame, x - 1, y - 1);
913  p_t = GET_PIXEL(frame, x, y - 1);
914  if (x == frame->width - 1)
915  p_tr = GET_PIXEL(frame, 0, y);
916  else
917  p_tr = GET_PIXEL(frame, x + 1, y - 1);
918 
919  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
920 
921  dec[0] += p[0];
922  dec[1] += p[1];
923  dec[2] += p[2];
924  dec[3] += p[3];
925 }
926 
928 {
929  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
930  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
931  int x, y;
932 
933  for (y = 0; y < img->frame->height; y++) {
934  for (x = 0; x < s->reduced_width; x++) {
935  int tx = x >> pimg->size_reduction;
936  int ty = y >> pimg->size_reduction;
937  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
938 
939  if (x == 0) {
940  if (y == 0)
941  m = PRED_MODE_BLACK;
942  else
943  m = PRED_MODE_T;
944  } else if (y == 0)
945  m = PRED_MODE_L;
946 
947  if (m > 13) {
948  av_log(s->avctx, AV_LOG_ERROR,
949  "invalid predictor mode: %d\n", m);
950  return AVERROR_INVALIDDATA;
951  }
952  inverse_prediction(img->frame, m, x, y);
953  }
954  }
955  return 0;
956 }
957 
958 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
959  uint8_t color)
960 {
961  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
962 }
963 
965 {
966  ImageContext *img, *cimg;
967  int x, y, cx, cy;
968  uint8_t *p, *cp;
969 
970  img = &s->image[IMAGE_ROLE_ARGB];
971  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
972 
973  for (y = 0; y < img->frame->height; y++) {
974  for (x = 0; x < s->reduced_width; x++) {
975  cx = x >> cimg->size_reduction;
976  cy = y >> cimg->size_reduction;
977  cp = GET_PIXEL(cimg->frame, cx, cy);
978  p = GET_PIXEL(img->frame, x, y);
979 
980  p[1] += color_transform_delta(cp[3], p[2]);
981  p[3] += color_transform_delta(cp[2], p[2]) +
982  color_transform_delta(cp[1], p[1]);
983  }
984  }
985  return 0;
986 }
987 
989 {
990  int x, y;
991  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
992 
993  for (y = 0; y < img->frame->height; y++) {
994  for (x = 0; x < s->reduced_width; x++) {
995  uint8_t *p = GET_PIXEL(img->frame, x, y);
996  p[1] += p[2];
997  p[3] += p[2];
998  }
999  }
1000  return 0;
1001 }
1002 
1004 {
1005  ImageContext *img;
1006  ImageContext *pal;
1007  int i, x, y;
1008  uint8_t *p;
1009 
1010  img = &s->image[IMAGE_ROLE_ARGB];
1011  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1012 
1013  if (pal->size_reduction > 0) { // undo pixel packing
1014  GetBitContext gb_g;
1015  uint8_t *line;
1016  int pixel_bits = 8 >> pal->size_reduction;
1017 
1018  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1019  if (!line)
1020  return AVERROR(ENOMEM);
1021 
1022  for (y = 0; y < img->frame->height; y++) {
1023  p = GET_PIXEL(img->frame, 0, y);
1024  memcpy(line, p, img->frame->linesize[0]);
1025  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1026  skip_bits(&gb_g, 16);
1027  i = 0;
1028  for (x = 0; x < img->frame->width; x++) {
1029  p = GET_PIXEL(img->frame, x, y);
1030  p[2] = get_bits(&gb_g, pixel_bits);
1031  i++;
1032  if (i == 1 << pal->size_reduction) {
1033  skip_bits(&gb_g, 24);
1034  i = 0;
1035  }
1036  }
1037  }
1038  av_free(line);
1039  s->reduced_width = s->width; // we are back to full size
1040  }
1041 
1042  // switch to local palette if it's worth initializing it
1043  if (img->frame->height * img->frame->width > 300) {
1044  uint8_t palette[256 * 4];
1045  const int size = pal->frame->width * 4;
1046  av_assert0(size <= 1024U);
1047  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1048  // set extra entries to transparent black
1049  memset(palette + size, 0, 256 * 4 - size);
1050  for (y = 0; y < img->frame->height; y++) {
1051  for (x = 0; x < img->frame->width; x++) {
1052  p = GET_PIXEL(img->frame, x, y);
1053  i = p[2];
1054  AV_COPY32(p, &palette[i * 4]);
1055  }
1056  }
1057  } else {
1058  for (y = 0; y < img->frame->height; y++) {
1059  for (x = 0; x < img->frame->width; x++) {
1060  p = GET_PIXEL(img->frame, x, y);
1061  i = p[2];
1062  if (i >= pal->frame->width) {
1063  AV_WB32(p, 0x00000000);
1064  } else {
1065  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1066  AV_COPY32(p, pi);
1067  }
1068  }
1069  }
1070  }
1071 
1072  return 0;
1073 }
1074 
1075 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1076 {
1077  WebPContext *s = avctx->priv_data;
1078  if (s->width && s->width != w) {
1079  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1080  s->width, w);
1081  }
1082  s->width = w;
1083  if (s->height && s->height != h) {
1084  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1085  s->height, h);
1086  }
1087  s->height = h;
1088 }
1089 
1091  int *got_frame, const uint8_t *data_start,
1092  unsigned int data_size, int is_alpha_chunk)
1093 {
1094  WebPContext *s = avctx->priv_data;
1095  int w, h, ret, i, used;
1096 
1097  if (!is_alpha_chunk) {
1098  s->lossless = 1;
1099  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1100  }
1101 
1102  ret = init_get_bits8(&s->gb, data_start, data_size);
1103  if (ret < 0)
1104  return ret;
1105 
1106  if (!is_alpha_chunk) {
1107  if (get_bits(&s->gb, 8) != 0x2F) {
1108  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1109  return AVERROR_INVALIDDATA;
1110  }
1111 
1112  w = get_bits(&s->gb, 14) + 1;
1113  h = get_bits(&s->gb, 14) + 1;
1114 
1115  update_canvas_size(avctx, w, h);
1116 
1117  ret = ff_set_dimensions(avctx, s->width, s->height);
1118  if (ret < 0)
1119  return ret;
1120 
1121  s->has_alpha = get_bits1(&s->gb);
1122 
1123  if (get_bits(&s->gb, 3) != 0x0) {
1124  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1125  return AVERROR_INVALIDDATA;
1126  }
1127  } else {
1128  if (!s->width || !s->height)
1129  return AVERROR_BUG;
1130  w = s->width;
1131  h = s->height;
1132  }
1133 
1134  /* parse transformations */
1135  s->nb_transforms = 0;
1136  s->reduced_width = s->width;
1137  used = 0;
1138  while (get_bits1(&s->gb)) {
1139  enum TransformType transform = get_bits(&s->gb, 2);
1140  if (used & (1 << transform)) {
1141  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1142  transform);
1144  goto free_and_return;
1145  }
1146  used |= (1 << transform);
1147  s->transforms[s->nb_transforms++] = transform;
1148  switch (transform) {
1149  case PREDICTOR_TRANSFORM:
1151  break;
1152  case COLOR_TRANSFORM:
1154  break;
1157  break;
1158  }
1159  if (ret < 0)
1160  goto free_and_return;
1161  }
1162 
1163  /* decode primary image */
1164  s->image[IMAGE_ROLE_ARGB].frame = p;
1165  if (is_alpha_chunk)
1166  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1168  if (ret < 0)
1169  goto free_and_return;
1170 
1171  /* apply transformations */
1172  for (i = s->nb_transforms - 1; i >= 0; i--) {
1173  switch (s->transforms[i]) {
1174  case PREDICTOR_TRANSFORM:
1176  break;
1177  case COLOR_TRANSFORM:
1179  break;
1180  case SUBTRACT_GREEN:
1182  break;
1185  break;
1186  }
1187  if (ret < 0)
1188  goto free_and_return;
1189  }
1190 
1191  *got_frame = 1;
1193  p->flags |= AV_FRAME_FLAG_KEY;
1194  ret = data_size;
1195 
1196 free_and_return:
1197  for (i = 0; i < IMAGE_ROLE_NB; i++)
1198  image_ctx_free(&s->image[i]);
1199 
1200  return ret;
1201 }
1202 
1204 {
1205  int x, y, ls;
1206  uint8_t *dec;
1207 
1208  ls = frame->linesize[3];
1209 
1210  /* filter first row using horizontal filter */
1211  dec = frame->data[3] + 1;
1212  for (x = 1; x < frame->width; x++, dec++)
1213  *dec += *(dec - 1);
1214 
1215  /* filter first column using vertical filter */
1216  dec = frame->data[3] + ls;
1217  for (y = 1; y < frame->height; y++, dec += ls)
1218  *dec += *(dec - ls);
1219 
1220  /* filter the rest using the specified filter */
1221  switch (m) {
1223  for (y = 1; y < frame->height; y++) {
1224  dec = frame->data[3] + y * ls + 1;
1225  for (x = 1; x < frame->width; x++, dec++)
1226  *dec += *(dec - 1);
1227  }
1228  break;
1229  case ALPHA_FILTER_VERTICAL:
1230  for (y = 1; y < frame->height; y++) {
1231  dec = frame->data[3] + y * ls + 1;
1232  for (x = 1; x < frame->width; x++, dec++)
1233  *dec += *(dec - ls);
1234  }
1235  break;
1236  case ALPHA_FILTER_GRADIENT:
1237  for (y = 1; y < frame->height; y++) {
1238  dec = frame->data[3] + y * ls + 1;
1239  for (x = 1; x < frame->width; x++, dec++)
1240  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1241  }
1242  break;
1243  }
1244 }
1245 
1247  const uint8_t *data_start,
1248  unsigned int data_size)
1249 {
1250  WebPContext *s = avctx->priv_data;
1251  int x, y, ret;
1252 
1253  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1254  GetByteContext gb;
1255 
1256  bytestream2_init(&gb, data_start, data_size);
1257  for (y = 0; y < s->height; y++)
1258  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1259  s->width);
1260  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1261  uint8_t *ap, *pp;
1262  int alpha_got_frame = 0;
1263 
1264  s->alpha_frame = av_frame_alloc();
1265  if (!s->alpha_frame)
1266  return AVERROR(ENOMEM);
1267 
1268  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1269  data_start, data_size, 1);
1270  if (ret < 0) {
1271  av_frame_free(&s->alpha_frame);
1272  return ret;
1273  }
1274  if (!alpha_got_frame) {
1275  av_frame_free(&s->alpha_frame);
1276  return AVERROR_INVALIDDATA;
1277  }
1278 
1279  /* copy green component of alpha image to alpha plane of primary image */
1280  for (y = 0; y < s->height; y++) {
1281  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1282  pp = p->data[3] + p->linesize[3] * y;
1283  for (x = 0; x < s->width; x++) {
1284  *pp = *ap;
1285  pp++;
1286  ap += 4;
1287  }
1288  }
1289  av_frame_free(&s->alpha_frame);
1290  }
1291 
1292  /* apply alpha filtering */
1293  if (s->alpha_filter)
1294  alpha_inverse_prediction(p, s->alpha_filter);
1295 
1296  return 0;
1297 }
1298 
1300  int *got_frame, uint8_t *data_start,
1301  unsigned int data_size)
1302 {
1303  WebPContext *s = avctx->priv_data;
1304  int ret;
1305 
1306  if (!s->initialized) {
1307  ff_vp8_decode_init(avctx);
1308  s->initialized = 1;
1309  s->v.actually_webp = 1;
1310  }
1311  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1312  s->lossless = 0;
1313 
1314  if (data_size > INT_MAX) {
1315  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1316  return AVERROR_PATCHWELCOME;
1317  }
1318 
1319  av_packet_unref(s->pkt);
1320  s->pkt->data = data_start;
1321  s->pkt->size = data_size;
1322 
1323  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1324  if (ret < 0)
1325  return ret;
1326 
1327  if (!*got_frame)
1328  return AVERROR_INVALIDDATA;
1329 
1330  update_canvas_size(avctx, avctx->width, avctx->height);
1331 
1332  if (s->has_alpha) {
1333  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1334  s->alpha_data_size);
1335  if (ret < 0)
1336  return ret;
1337  }
1338  return ret;
1339 }
1340 
1342  int *got_frame, AVPacket *avpkt)
1343 {
1344  WebPContext *s = avctx->priv_data;
1345  GetByteContext gb;
1346  int ret;
1347  uint32_t chunk_type, chunk_size;
1348  int vp8x_flags = 0;
1349 
1350  s->avctx = avctx;
1351  s->width = 0;
1352  s->height = 0;
1353  *got_frame = 0;
1354  s->has_alpha = 0;
1355  s->has_exif = 0;
1356  s->has_iccp = 0;
1357  bytestream2_init(&gb, avpkt->data, avpkt->size);
1358 
1359  if (bytestream2_get_bytes_left(&gb) < 12)
1360  return AVERROR_INVALIDDATA;
1361 
1362  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1363  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1364  return AVERROR_INVALIDDATA;
1365  }
1366 
1367  chunk_size = bytestream2_get_le32(&gb);
1368  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1369  return AVERROR_INVALIDDATA;
1370 
1371  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1372  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1373  return AVERROR_INVALIDDATA;
1374  }
1375 
1376  while (bytestream2_get_bytes_left(&gb) > 8) {
1377  char chunk_str[5] = { 0 };
1378 
1379  chunk_type = bytestream2_get_le32(&gb);
1380  chunk_size = bytestream2_get_le32(&gb);
1381  if (chunk_size == UINT32_MAX)
1382  return AVERROR_INVALIDDATA;
1383  chunk_size += chunk_size & 1;
1384 
1385  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1386  /* we seem to be running out of data, but it could also be that the
1387  bitstream has trailing junk leading to bogus chunk_size. */
1388  break;
1389  }
1390 
1391  switch (chunk_type) {
1392  case MKTAG('V', 'P', '8', ' '):
1393  if (!*got_frame) {
1394  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1395  avpkt->data + bytestream2_tell(&gb),
1396  chunk_size);
1397  if (ret < 0)
1398  return ret;
1399  }
1400  bytestream2_skip(&gb, chunk_size);
1401  break;
1402  case MKTAG('V', 'P', '8', 'L'):
1403  if (!*got_frame) {
1404  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1405  avpkt->data + bytestream2_tell(&gb),
1406  chunk_size, 0);
1407  if (ret < 0)
1408  return ret;
1410  }
1411  bytestream2_skip(&gb, chunk_size);
1412  break;
1413  case MKTAG('V', 'P', '8', 'X'):
1414  if (s->width || s->height || *got_frame) {
1415  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1416  return AVERROR_INVALIDDATA;
1417  }
1418  vp8x_flags = bytestream2_get_byte(&gb);
1419  bytestream2_skip(&gb, 3);
1420  s->width = bytestream2_get_le24(&gb) + 1;
1421  s->height = bytestream2_get_le24(&gb) + 1;
1422  ret = av_image_check_size(s->width, s->height, 0, avctx);
1423  if (ret < 0)
1424  return ret;
1425  break;
1426  case MKTAG('A', 'L', 'P', 'H'): {
1427  int alpha_header, filter_m, compression;
1428 
1429  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1430  av_log(avctx, AV_LOG_WARNING,
1431  "ALPHA chunk present, but alpha bit not set in the "
1432  "VP8X header\n");
1433  }
1434  if (chunk_size == 0) {
1435  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1436  return AVERROR_INVALIDDATA;
1437  }
1438  alpha_header = bytestream2_get_byte(&gb);
1439  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1440  s->alpha_data_size = chunk_size - 1;
1441  bytestream2_skip(&gb, s->alpha_data_size);
1442 
1443  filter_m = (alpha_header >> 2) & 0x03;
1444  compression = alpha_header & 0x03;
1445 
1446  if (compression > ALPHA_COMPRESSION_VP8L) {
1447  av_log(avctx, AV_LOG_VERBOSE,
1448  "skipping unsupported ALPHA chunk\n");
1449  } else {
1450  s->has_alpha = 1;
1451  s->alpha_compression = compression;
1452  s->alpha_filter = filter_m;
1453  }
1454 
1455  break;
1456  }
1457  case MKTAG('E', 'X', 'I', 'F'): {
1458  int le, ifd_offset, exif_offset = bytestream2_tell(&gb);
1459  AVDictionary *exif_metadata = NULL;
1460  GetByteContext exif_gb;
1461 
1462  if (s->has_exif) {
1463  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1464  goto exif_end;
1465  }
1466  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1467  av_log(avctx, AV_LOG_WARNING,
1468  "EXIF chunk present, but Exif bit not set in the "
1469  "VP8X header\n");
1470 
1471  s->has_exif = 1;
1472  bytestream2_init(&exif_gb, avpkt->data + exif_offset,
1473  avpkt->size - exif_offset);
1474  if (ff_tdecode_header(&exif_gb, &le, &ifd_offset) < 0) {
1475  av_log(avctx, AV_LOG_ERROR, "invalid TIFF header "
1476  "in Exif data\n");
1477  goto exif_end;
1478  }
1479 
1480  bytestream2_seek(&exif_gb, ifd_offset, SEEK_SET);
1481  if (ff_exif_decode_ifd(avctx, &exif_gb, le, 0, &exif_metadata) < 0) {
1482  av_log(avctx, AV_LOG_ERROR, "error decoding Exif data\n");
1483  goto exif_end;
1484  }
1485 
1486  av_dict_copy(&p->metadata, exif_metadata, 0);
1487 
1488 exif_end:
1489  av_dict_free(&exif_metadata);
1490  bytestream2_skip(&gb, chunk_size);
1491  break;
1492  }
1493  case MKTAG('I', 'C', 'C', 'P'): {
1494  AVFrameSideData *sd;
1495 
1496  if (s->has_iccp) {
1497  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1498  bytestream2_skip(&gb, chunk_size);
1499  break;
1500  }
1501  if (!(vp8x_flags & VP8X_FLAG_ICC))
1502  av_log(avctx, AV_LOG_WARNING,
1503  "ICCP chunk present, but ICC Profile bit not set in the "
1504  "VP8X header\n");
1505 
1506  s->has_iccp = 1;
1507 
1508  ret = ff_frame_new_side_data(avctx, p, AV_FRAME_DATA_ICC_PROFILE, chunk_size, &sd);
1509  if (ret < 0)
1510  return ret;
1511 
1512  if (sd) {
1513  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1514  } else {
1515  bytestream2_skip(&gb, chunk_size);
1516  }
1517  break;
1518  }
1519  case MKTAG('A', 'N', 'I', 'M'):
1520  case MKTAG('A', 'N', 'M', 'F'):
1521  case MKTAG('X', 'M', 'P', ' '):
1522  AV_WL32(chunk_str, chunk_type);
1523  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1524  chunk_str);
1525  bytestream2_skip(&gb, chunk_size);
1526  break;
1527  default:
1528  AV_WL32(chunk_str, chunk_type);
1529  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1530  chunk_str);
1531  bytestream2_skip(&gb, chunk_size);
1532  break;
1533  }
1534  }
1535 
1536  if (!*got_frame) {
1537  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1538  return AVERROR_INVALIDDATA;
1539  }
1540 
1541  return avpkt->size;
1542 }
1543 
1545 {
1546  WebPContext *s = avctx->priv_data;
1547 
1548  s->pkt = av_packet_alloc();
1549  if (!s->pkt)
1550  return AVERROR(ENOMEM);
1551 
1552  return 0;
1553 }
1554 
1556 {
1557  WebPContext *s = avctx->priv_data;
1558 
1559  av_packet_free(&s->pkt);
1560 
1561  if (s->initialized)
1562  return ff_vp8_decode_free(avctx);
1563 
1564  return 0;
1565 }
1566 
1568  .p.name = "webp",
1569  CODEC_LONG_NAME("WebP image"),
1570  .p.type = AVMEDIA_TYPE_VIDEO,
1571  .p.id = AV_CODEC_ID_WEBP,
1572  .priv_data_size = sizeof(WebPContext),
1575  .close = webp_decode_close,
1576  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1577  .caps_internal = FF_CODEC_CAP_ICC_PROFILES |
1579 };
WebPContext::width
int width
Definition: webp.c:208
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:197
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:429
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2854
HuffReader::vlc
VLC vlc
Definition: webp.c:174
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:869
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:70
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1299
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:304
color
Definition: vf_paletteuse.c:513
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:126
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:108
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:177
GetByteContext
Definition: bytestream.h:33
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:243
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:103
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:121
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:788
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:774
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::width
int width
Definition: frame.h:461
w
uint8_t w
Definition: llviddspenc.c:38
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:224
AVPacket::data
uint8_t * data
Definition: packet.h:539
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:133
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:117
b
#define b
Definition: input.c:41
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:116
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:185
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:479
FFCodec
Definition: codec_internal.h:127
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:129
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, int alphabet_size)
Definition: webp.c:255
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:213
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:124
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:128
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, const uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1090
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
HuffReader::simple
int simple
Definition: webp.c:175
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:125
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:202
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:845
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:825
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:199
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:102
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:212
GetBitContext
Definition: get_bits.h:108
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1075
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:205
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:894
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:115
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:59
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:781
ff_webp_decoder
const FFCodec ff_webp_decoder
Definition: webp.c:1567
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:958
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:553
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:138
WebPContext::has_exif
int has_exif
Definition: webp.c:206
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:332
WebPContext::has_alpha
int has_alpha
Definition: webp.c:201
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
PredictionMode
PredictionMode
Definition: webp.c:120
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
ImageContext::frame
AVFrame * frame
Definition: webp.c:182
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1807
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:905
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
TransformType
TransformType
Definition: webp.c:113
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:130
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:66
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:79
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:547
bits
uint8_t bits
Definition: vp3data.h:128
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:69
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:855
vlc_init
#define vlc_init(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:62
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:65
ImageContext
Definition: webp.c:180
decode.h
get_bits.h
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:418
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:184
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:221
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:191
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:127
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1555
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:186
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:988
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:176
WebPContext::height
int height
Definition: webp.c:209
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:107
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:878
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:142
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:898
tiff_common.h
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:183
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:495
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1544
WebPContext::v
VP8Context v
Definition: webp.c:195
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:73
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:67
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:161
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
ff_vp8_decode_init
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2898
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1203
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:168
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:760
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:170
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:61
AVPacket::size
int size
Definition: packet.h:540
codec_internal.h
AlphaCompression
AlphaCompression
Definition: webp.c:101
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:114
ImageContext::size_reduction
int size_reduction
Definition: webp.c:190
size
int size
Definition: twinvq_data.h:10344
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2099
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
ImageContext::role
enum ImageRole role
Definition: webp.c:181
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:433
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:964
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:60
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
HuffReader
Definition: webp.c:173
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:463
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:131
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:110
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:217
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:795
WebPContext::lossless
int lossless
Definition: webp.c:210
WebPContext::reduced_width
int reduced_width
Definition: webp.c:216
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:68
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1806
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:198
AlphaFilter
AlphaFilter
Definition: webp.c:106
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:132
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:83
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
WebPContext::gb
GetBitContext gb
Definition: webp.c:196
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:927
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HuffmanIndex
HuffmanIndex
Definition: webp.c:137
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:226
len
int len
Definition: vorbis_enc_data.h:426
exif.h
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:815
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:244
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:885
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:218
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:805
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, const uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1246
AVCodecContext
main external API structure.
Definition: avcodec.h:451
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:140
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:157
VLC
Definition: vlc.h:36
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1341
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:707
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:229
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:227
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
WebPContext::initialized
int initialized
Definition: webp.c:200
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:1003
mem.h
WebPContext::alpha_data
const uint8_t * alpha_data
Definition: webp.c:204
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
VLC_INIT_OUTPUT_LE
#define VLC_INIT_OUTPUT_LE
Definition: vlc.h:188
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:71
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:109
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:427
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:122
WebPContext
Definition: webp.c:194
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2840
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
VP8Context
Definition: vp8.h:161
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
ImageRole
ImageRole
Definition: webp.c:151
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:231
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:317
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:141
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:207
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:530
width
#define width
Definition: dsp.h:85
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:835
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:103
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:767
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:123
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:203
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:139
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:153
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:134
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:165