FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 
45 #define BITSTREAM_READER_LE
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "exif.h"
49 #include "get_bits.h"
50 #include "internal.h"
51 #include "thread.h"
52 #include "vp8.h"
53 
54 #define VP8X_FLAG_ANIMATION 0x02
55 #define VP8X_FLAG_XMP_METADATA 0x04
56 #define VP8X_FLAG_EXIF_METADATA 0x08
57 #define VP8X_FLAG_ALPHA 0x10
58 #define VP8X_FLAG_ICC 0x20
59 
60 #define MAX_PALETTE_SIZE 256
61 #define MAX_CACHE_BITS 11
62 #define NUM_CODE_LENGTH_CODES 19
63 #define HUFFMAN_CODES_PER_META_CODE 5
64 #define NUM_LITERAL_CODES 256
65 #define NUM_LENGTH_CODES 24
66 #define NUM_DISTANCE_CODES 40
67 #define NUM_SHORT_DISTANCES 120
68 #define MAX_HUFFMAN_CODE_LENGTH 15
69 
70 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
74 };
75 
77  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
78 };
79 
80 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
81  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
82  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
83  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
84  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
85  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
86  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
87  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
88  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
89  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
90  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
91  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
92  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
93  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
94  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
95  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
96 };
97 
101 };
102 
108 };
109 
115 };
116 
132 };
133 
140 };
141 
142 /* The structure of WebP lossless is an optional series of transformation data,
143  * followed by the primary image. The primary image also optionally contains
144  * an entropy group mapping if there are multiple entropy groups. There is a
145  * basic image type called an "entropy coded image" that is used for all of
146  * these. The type of each entropy coded image is referred to by the
147  * specification as its role. */
148 enum ImageRole {
149  /* Primary Image: Stores the actual pixels of the image. */
151 
152  /* Entropy Image: Defines which Huffman group to use for different areas of
153  * the primary image. */
155 
156  /* Predictors: Defines which predictor type to use for different areas of
157  * the primary image. */
159 
160  /* Color Transform Data: Defines the color transformation for different
161  * areas of the primary image. */
163 
164  /* Color Index: Stored as an image of height == 1. */
166 
168 };
169 
170 typedef struct HuffReader {
171  VLC vlc; /* Huffman decoder context */
172  int simple; /* whether to use simple mode */
173  int nb_symbols; /* number of coded symbols */
174  uint16_t simple_symbols[2]; /* symbols for simple mode */
175 } HuffReader;
176 
177 typedef struct ImageContext {
178  enum ImageRole role; /* role of this image */
179  AVFrame *frame; /* AVFrame for data */
180  int color_cache_bits; /* color cache size, log2 */
181  uint32_t *color_cache; /* color cache data */
182  int nb_huffman_groups; /* number of huffman groups */
183  HuffReader *huffman_groups; /* reader for each huffman group */
184  /* relative size compared to primary image, log2.
185  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
186  * number of pixels per byte in the primary image (pixel packing) */
189 } ImageContext;
190 
191 typedef struct WebPContext {
192  VP8Context v; /* VP8 Context used for lossy decoding */
193  GetBitContext gb; /* bitstream reader for main image chunk */
194  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
195  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
196  AVCodecContext *avctx; /* parent AVCodecContext */
197  int initialized; /* set once the VP8 context is initialized */
198  int has_alpha; /* has a separate alpha chunk */
199  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
200  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
201  uint8_t *alpha_data; /* alpha chunk data */
202  int alpha_data_size; /* alpha chunk data size */
203  int has_exif; /* set after an EXIF chunk has been processed */
204  int has_iccp; /* set after an ICCP chunk has been processed */
205  int width; /* image width */
206  int height; /* image height */
207  int lossless; /* indicates lossless or lossy */
208 
209  int nb_transforms; /* number of transforms */
210  enum TransformType transforms[4]; /* transformations used in the image, in order */
211  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
212  * before pixels are unpacked, or same as width otherwise. */
214  int nb_huffman_groups; /* number of huffman groups in the primary image */
215  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
216 } WebPContext;
217 
218 #define GET_PIXEL(frame, x, y) \
219  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
220 
221 #define GET_PIXEL_COMP(frame, x, y, c) \
222  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
223 
225 {
226  int i, j;
227 
228  av_free(img->color_cache);
229  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
230  av_frame_free(&img->frame);
231  if (img->huffman_groups) {
232  for (i = 0; i < img->nb_huffman_groups; i++) {
233  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
234  ff_free_vlc(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
235  }
236  av_free(img->huffman_groups);
237  }
238  memset(img, 0, sizeof(*img));
239 }
240 
242 {
243  if (r->simple) {
244  if (r->nb_symbols == 1)
245  return r->simple_symbols[0];
246  else
247  return r->simple_symbols[get_bits1(gb)];
248  } else
249  return get_vlc2(gb, r->vlc.table, 8, 2);
250 }
251 
252 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
253  int alphabet_size)
254 {
255  int len = 0, sym, code = 0, ret;
256  int max_code_length = 0;
257  uint16_t *codes;
258 
259  /* special-case 1 symbol since the vlc reader cannot handle it */
260  for (sym = 0; sym < alphabet_size; sym++) {
261  if (code_lengths[sym] > 0) {
262  len++;
263  code = sym;
264  if (len > 1)
265  break;
266  }
267  }
268  if (len == 1) {
269  r->nb_symbols = 1;
270  r->simple_symbols[0] = code;
271  r->simple = 1;
272  return 0;
273  }
274 
275  for (sym = 0; sym < alphabet_size; sym++)
276  max_code_length = FFMAX(max_code_length, code_lengths[sym]);
277 
278  if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
279  return AVERROR(EINVAL);
280 
281  codes = av_malloc_array(alphabet_size, sizeof(*codes));
282  if (!codes)
283  return AVERROR(ENOMEM);
284 
285  code = 0;
286  r->nb_symbols = 0;
287  for (len = 1; len <= max_code_length; len++) {
288  for (sym = 0; sym < alphabet_size; sym++) {
289  if (code_lengths[sym] != len)
290  continue;
291  codes[sym] = code++;
292  r->nb_symbols++;
293  }
294  code <<= 1;
295  }
296  if (!r->nb_symbols) {
297  av_free(codes);
298  return AVERROR_INVALIDDATA;
299  }
300 
301  ret = init_vlc(&r->vlc, 8, alphabet_size,
302  code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
303  codes, sizeof(*codes), sizeof(*codes), INIT_VLC_OUTPUT_LE);
304  if (ret < 0) {
305  av_free(codes);
306  return ret;
307  }
308  r->simple = 0;
309 
310  av_free(codes);
311  return 0;
312 }
313 
315 {
316  hc->nb_symbols = get_bits1(&s->gb) + 1;
317 
318  if (get_bits1(&s->gb))
319  hc->simple_symbols[0] = get_bits(&s->gb, 8);
320  else
321  hc->simple_symbols[0] = get_bits1(&s->gb);
322 
323  if (hc->nb_symbols == 2)
324  hc->simple_symbols[1] = get_bits(&s->gb, 8);
325 
326  hc->simple = 1;
327 }
328 
330  int alphabet_size)
331 {
332  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
333  uint8_t *code_lengths;
334  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
335  int i, symbol, max_symbol, prev_code_len, ret;
336  int num_codes = 4 + get_bits(&s->gb, 4);
337 
338  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
339 
340  for (i = 0; i < num_codes; i++)
341  code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
342 
343  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
345  if (ret < 0)
346  return ret;
347 
348  code_lengths = av_mallocz(alphabet_size);
349  if (!code_lengths) {
350  ret = AVERROR(ENOMEM);
351  goto finish;
352  }
353 
354  if (get_bits1(&s->gb)) {
355  int bits = 2 + 2 * get_bits(&s->gb, 3);
356  max_symbol = 2 + get_bits(&s->gb, bits);
357  if (max_symbol > alphabet_size) {
358  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
359  max_symbol, alphabet_size);
361  goto finish;
362  }
363  } else {
364  max_symbol = alphabet_size;
365  }
366 
367  prev_code_len = 8;
368  symbol = 0;
369  while (symbol < alphabet_size) {
370  int code_len;
371 
372  if (!max_symbol--)
373  break;
374  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
375  if (code_len < 16) {
376  /* Code length code [0..15] indicates literal code lengths. */
377  code_lengths[symbol++] = code_len;
378  if (code_len)
379  prev_code_len = code_len;
380  } else {
381  int repeat = 0, length = 0;
382  switch (code_len) {
383  case 16:
384  /* Code 16 repeats the previous non-zero value [3..6] times,
385  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
386  * non-zero value has been emitted, a value of 8 is repeated. */
387  repeat = 3 + get_bits(&s->gb, 2);
388  length = prev_code_len;
389  break;
390  case 17:
391  /* Code 17 emits a streak of zeros [3..10], i.e.,
392  * 3 + ReadBits(3) times. */
393  repeat = 3 + get_bits(&s->gb, 3);
394  break;
395  case 18:
396  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
397  * 11 + ReadBits(7) times. */
398  repeat = 11 + get_bits(&s->gb, 7);
399  break;
400  }
401  if (symbol + repeat > alphabet_size) {
402  av_log(s->avctx, AV_LOG_ERROR,
403  "invalid symbol %d + repeat %d > alphabet size %d\n",
404  symbol, repeat, alphabet_size);
406  goto finish;
407  }
408  while (repeat-- > 0)
409  code_lengths[symbol++] = length;
410  }
411  }
412 
413  ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
414 
415 finish:
416  ff_free_vlc(&code_len_hc.vlc);
417  av_free(code_lengths);
418  return ret;
419 }
420 
421 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
422  int w, int h);
423 
424 #define PARSE_BLOCK_SIZE(w, h) do { \
425  block_bits = get_bits(&s->gb, 3) + 2; \
426  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
427  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
428 } while (0)
429 
431 {
432  ImageContext *img;
433  int ret, block_bits, blocks_w, blocks_h, x, y, max;
434 
435  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
436 
437  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
438  if (ret < 0)
439  return ret;
440 
441  img = &s->image[IMAGE_ROLE_ENTROPY];
442  img->size_reduction = block_bits;
443 
444  /* the number of huffman groups is determined by the maximum group number
445  * coded in the entropy image */
446  max = 0;
447  for (y = 0; y < img->frame->height; y++) {
448  for (x = 0; x < img->frame->width; x++) {
449  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
450  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
451  int p = p0 << 8 | p1;
452  max = FFMAX(max, p);
453  }
454  }
455  s->nb_huffman_groups = max + 1;
456 
457  return 0;
458 }
459 
461 {
462  int block_bits, blocks_w, blocks_h, ret;
463 
464  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
465 
467  blocks_h);
468  if (ret < 0)
469  return ret;
470 
471  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
472 
473  return 0;
474 }
475 
477 {
478  int block_bits, blocks_w, blocks_h, ret;
479 
480  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
481 
483  blocks_h);
484  if (ret < 0)
485  return ret;
486 
487  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
488 
489  return 0;
490 }
491 
493 {
494  ImageContext *img;
495  int width_bits, index_size, ret, x;
496  uint8_t *ct;
497 
498  index_size = get_bits(&s->gb, 8) + 1;
499 
500  if (index_size <= 2)
501  width_bits = 3;
502  else if (index_size <= 4)
503  width_bits = 2;
504  else if (index_size <= 16)
505  width_bits = 1;
506  else
507  width_bits = 0;
508 
510  index_size, 1);
511  if (ret < 0)
512  return ret;
513 
514  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
515  img->size_reduction = width_bits;
516  if (width_bits > 0)
517  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
518 
519  /* color index values are delta-coded */
520  ct = img->frame->data[0] + 4;
521  for (x = 4; x < img->frame->width * 4; x++, ct++)
522  ct[0] += ct[-4];
523 
524  return 0;
525 }
526 
528  int x, int y)
529 {
530  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
531  int group = 0;
532 
533  if (gimg->size_reduction > 0) {
534  int group_x = x >> gimg->size_reduction;
535  int group_y = y >> gimg->size_reduction;
536  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
537  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
538  group = g0 << 8 | g1;
539  }
540 
541  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
542 }
543 
545 {
546  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
547  img->color_cache[cache_idx] = c;
548 }
549 
551  int w, int h)
552 {
553  ImageContext *img;
554  HuffReader *hg;
555  int i, j, ret, x, y, width;
556 
557  img = &s->image[role];
558  img->role = role;
559 
560  if (!img->frame) {
561  img->frame = av_frame_alloc();
562  if (!img->frame)
563  return AVERROR(ENOMEM);
564  }
565 
566  img->frame->format = AV_PIX_FMT_ARGB;
567  img->frame->width = w;
568  img->frame->height = h;
569 
570  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
571  ThreadFrame pt = { .f = img->frame };
572  ret = ff_thread_get_buffer(s->avctx, &pt, 0);
573  } else
574  ret = av_frame_get_buffer(img->frame, 1);
575  if (ret < 0)
576  return ret;
577 
578  if (get_bits1(&s->gb)) {
579  img->color_cache_bits = get_bits(&s->gb, 4);
580  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
581  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
582  img->color_cache_bits);
583  return AVERROR_INVALIDDATA;
584  }
585  img->color_cache = av_calloc(1 << img->color_cache_bits,
586  sizeof(*img->color_cache));
587  if (!img->color_cache)
588  return AVERROR(ENOMEM);
589  } else {
590  img->color_cache_bits = 0;
591  }
592 
593  img->nb_huffman_groups = 1;
594  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
596  if (ret < 0)
597  return ret;
598  img->nb_huffman_groups = s->nb_huffman_groups;
599  }
600  img->huffman_groups = av_calloc(img->nb_huffman_groups,
602  sizeof(*img->huffman_groups));
603  if (!img->huffman_groups)
604  return AVERROR(ENOMEM);
605 
606  for (i = 0; i < img->nb_huffman_groups; i++) {
607  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
608  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
609  int alphabet_size = alphabet_sizes[j];
610  if (!j && img->color_cache_bits > 0)
611  alphabet_size += 1 << img->color_cache_bits;
612 
613  if (get_bits1(&s->gb)) {
614  read_huffman_code_simple(s, &hg[j]);
615  } else {
616  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
617  if (ret < 0)
618  return ret;
619  }
620  }
621  }
622 
623  width = img->frame->width;
624  if (role == IMAGE_ROLE_ARGB)
625  width = s->reduced_width;
626 
627  x = 0; y = 0;
628  while (y < img->frame->height) {
629  int v;
630 
631  if (get_bits_left(&s->gb) < 0)
632  return AVERROR_INVALIDDATA;
633 
634  hg = get_huffman_group(s, img, x, y);
635  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
636  if (v < NUM_LITERAL_CODES) {
637  /* literal pixel values */
638  uint8_t *p = GET_PIXEL(img->frame, x, y);
639  p[2] = v;
640  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
641  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
642  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
643  if (img->color_cache_bits)
645  x++;
646  if (x == width) {
647  x = 0;
648  y++;
649  }
650  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
651  /* LZ77 backwards mapping */
652  int prefix_code, length, distance, ref_x, ref_y;
653 
654  /* parse length and distance */
655  prefix_code = v - NUM_LITERAL_CODES;
656  if (prefix_code < 4) {
657  length = prefix_code + 1;
658  } else {
659  int extra_bits = (prefix_code - 2) >> 1;
660  int offset = 2 + (prefix_code & 1) << extra_bits;
661  length = offset + get_bits(&s->gb, extra_bits) + 1;
662  }
663  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
664  if (prefix_code > 39U) {
665  av_log(s->avctx, AV_LOG_ERROR,
666  "distance prefix code too large: %d\n", prefix_code);
667  return AVERROR_INVALIDDATA;
668  }
669  if (prefix_code < 4) {
670  distance = prefix_code + 1;
671  } else {
672  int extra_bits = prefix_code - 2 >> 1;
673  int offset = 2 + (prefix_code & 1) << extra_bits;
674  distance = offset + get_bits(&s->gb, extra_bits) + 1;
675  }
676 
677  /* find reference location */
678  if (distance <= NUM_SHORT_DISTANCES) {
679  int xi = lz77_distance_offsets[distance - 1][0];
680  int yi = lz77_distance_offsets[distance - 1][1];
681  distance = FFMAX(1, xi + yi * width);
682  } else {
684  }
685  ref_x = x;
686  ref_y = y;
687  if (distance <= x) {
688  ref_x -= distance;
689  distance = 0;
690  } else {
691  ref_x = 0;
692  distance -= x;
693  }
694  while (distance >= width) {
695  ref_y--;
696  distance -= width;
697  }
698  if (distance > 0) {
699  ref_x = width - distance;
700  ref_y--;
701  }
702  ref_x = FFMAX(0, ref_x);
703  ref_y = FFMAX(0, ref_y);
704 
705  /* copy pixels
706  * source and dest regions can overlap and wrap lines, so just
707  * copy per-pixel */
708  for (i = 0; i < length; i++) {
709  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
710  uint8_t *p = GET_PIXEL(img->frame, x, y);
711 
712  AV_COPY32(p, p_ref);
713  if (img->color_cache_bits)
715  x++;
716  ref_x++;
717  if (x == width) {
718  x = 0;
719  y++;
720  }
721  if (ref_x == width) {
722  ref_x = 0;
723  ref_y++;
724  }
725  if (y == img->frame->height || ref_y == img->frame->height)
726  break;
727  }
728  } else {
729  /* read from color cache */
730  uint8_t *p = GET_PIXEL(img->frame, x, y);
731  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
732 
733  if (!img->color_cache_bits) {
734  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
735  return AVERROR_INVALIDDATA;
736  }
737  if (cache_idx >= 1 << img->color_cache_bits) {
738  av_log(s->avctx, AV_LOG_ERROR,
739  "color cache index out-of-bounds\n");
740  return AVERROR_INVALIDDATA;
741  }
742  AV_WB32(p, img->color_cache[cache_idx]);
743  x++;
744  if (x == width) {
745  x = 0;
746  y++;
747  }
748  }
749  }
750 
751  return 0;
752 }
753 
754 /* PRED_MODE_BLACK */
755 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
756  const uint8_t *p_t, const uint8_t *p_tr)
757 {
758  AV_WB32(p, 0xFF000000);
759 }
760 
761 /* PRED_MODE_L */
762 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
763  const uint8_t *p_t, const uint8_t *p_tr)
764 {
765  AV_COPY32(p, p_l);
766 }
767 
768 /* PRED_MODE_T */
769 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
770  const uint8_t *p_t, const uint8_t *p_tr)
771 {
772  AV_COPY32(p, p_t);
773 }
774 
775 /* PRED_MODE_TR */
776 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
777  const uint8_t *p_t, const uint8_t *p_tr)
778 {
779  AV_COPY32(p, p_tr);
780 }
781 
782 /* PRED_MODE_TL */
783 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
784  const uint8_t *p_t, const uint8_t *p_tr)
785 {
786  AV_COPY32(p, p_tl);
787 }
788 
789 /* PRED_MODE_AVG_T_AVG_L_TR */
790 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
791  const uint8_t *p_t, const uint8_t *p_tr)
792 {
793  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
794  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
795  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
796  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
797 }
798 
799 /* PRED_MODE_AVG_L_TL */
800 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
801  const uint8_t *p_t, const uint8_t *p_tr)
802 {
803  p[0] = p_l[0] + p_tl[0] >> 1;
804  p[1] = p_l[1] + p_tl[1] >> 1;
805  p[2] = p_l[2] + p_tl[2] >> 1;
806  p[3] = p_l[3] + p_tl[3] >> 1;
807 }
808 
809 /* PRED_MODE_AVG_L_T */
810 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
811  const uint8_t *p_t, const uint8_t *p_tr)
812 {
813  p[0] = p_l[0] + p_t[0] >> 1;
814  p[1] = p_l[1] + p_t[1] >> 1;
815  p[2] = p_l[2] + p_t[2] >> 1;
816  p[3] = p_l[3] + p_t[3] >> 1;
817 }
818 
819 /* PRED_MODE_AVG_TL_T */
820 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
821  const uint8_t *p_t, const uint8_t *p_tr)
822 {
823  p[0] = p_tl[0] + p_t[0] >> 1;
824  p[1] = p_tl[1] + p_t[1] >> 1;
825  p[2] = p_tl[2] + p_t[2] >> 1;
826  p[3] = p_tl[3] + p_t[3] >> 1;
827 }
828 
829 /* PRED_MODE_AVG_T_TR */
830 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
831  const uint8_t *p_t, const uint8_t *p_tr)
832 {
833  p[0] = p_t[0] + p_tr[0] >> 1;
834  p[1] = p_t[1] + p_tr[1] >> 1;
835  p[2] = p_t[2] + p_tr[2] >> 1;
836  p[3] = p_t[3] + p_tr[3] >> 1;
837 }
838 
839 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
840 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
841  const uint8_t *p_t, const uint8_t *p_tr)
842 {
843  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
844  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
845  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
846  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
847 }
848 
849 /* PRED_MODE_SELECT */
850 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
851  const uint8_t *p_t, const uint8_t *p_tr)
852 {
853  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
854  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
855  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
856  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
857  if (diff <= 0)
858  AV_COPY32(p, p_t);
859  else
860  AV_COPY32(p, p_l);
861 }
862 
863 /* PRED_MODE_ADD_SUBTRACT_FULL */
864 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
865  const uint8_t *p_t, const uint8_t *p_tr)
866 {
867  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
868  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
869  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
870  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
871 }
872 
873 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
874 {
875  int d = a + b >> 1;
876  return av_clip_uint8(d + (d - c) / 2);
877 }
878 
879 /* PRED_MODE_ADD_SUBTRACT_HALF */
880 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
881  const uint8_t *p_t, const uint8_t *p_tr)
882 {
883  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
884  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
885  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
886  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
887 }
888 
889 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
890  const uint8_t *p_tl, const uint8_t *p_t,
891  const uint8_t *p_tr);
892 
893 static const inv_predict_func inverse_predict[14] = {
898 };
899 
900 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
901 {
902  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
903  uint8_t p[4];
904 
905  dec = GET_PIXEL(frame, x, y);
906  p_l = GET_PIXEL(frame, x - 1, y);
907  p_tl = GET_PIXEL(frame, x - 1, y - 1);
908  p_t = GET_PIXEL(frame, x, y - 1);
909  if (x == frame->width - 1)
910  p_tr = GET_PIXEL(frame, 0, y);
911  else
912  p_tr = GET_PIXEL(frame, x + 1, y - 1);
913 
914  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
915 
916  dec[0] += p[0];
917  dec[1] += p[1];
918  dec[2] += p[2];
919  dec[3] += p[3];
920 }
921 
923 {
924  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
925  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
926  int x, y;
927 
928  for (y = 0; y < img->frame->height; y++) {
929  for (x = 0; x < s->reduced_width; x++) {
930  int tx = x >> pimg->size_reduction;
931  int ty = y >> pimg->size_reduction;
932  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
933 
934  if (x == 0) {
935  if (y == 0)
936  m = PRED_MODE_BLACK;
937  else
938  m = PRED_MODE_T;
939  } else if (y == 0)
940  m = PRED_MODE_L;
941 
942  if (m > 13) {
943  av_log(s->avctx, AV_LOG_ERROR,
944  "invalid predictor mode: %d\n", m);
945  return AVERROR_INVALIDDATA;
946  }
947  inverse_prediction(img->frame, m, x, y);
948  }
949  }
950  return 0;
951 }
952 
953 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
954  uint8_t color)
955 {
956  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
957 }
958 
960 {
961  ImageContext *img, *cimg;
962  int x, y, cx, cy;
963  uint8_t *p, *cp;
964 
965  img = &s->image[IMAGE_ROLE_ARGB];
966  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
967 
968  for (y = 0; y < img->frame->height; y++) {
969  for (x = 0; x < s->reduced_width; x++) {
970  cx = x >> cimg->size_reduction;
971  cy = y >> cimg->size_reduction;
972  cp = GET_PIXEL(cimg->frame, cx, cy);
973  p = GET_PIXEL(img->frame, x, y);
974 
975  p[1] += color_transform_delta(cp[3], p[2]);
976  p[3] += color_transform_delta(cp[2], p[2]) +
977  color_transform_delta(cp[1], p[1]);
978  }
979  }
980  return 0;
981 }
982 
984 {
985  int x, y;
986  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
987 
988  for (y = 0; y < img->frame->height; y++) {
989  for (x = 0; x < s->reduced_width; x++) {
990  uint8_t *p = GET_PIXEL(img->frame, x, y);
991  p[1] += p[2];
992  p[3] += p[2];
993  }
994  }
995  return 0;
996 }
997 
999 {
1000  ImageContext *img;
1001  ImageContext *pal;
1002  int i, x, y;
1003  uint8_t *p;
1004 
1005  img = &s->image[IMAGE_ROLE_ARGB];
1006  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1007 
1008  if (pal->size_reduction > 0) { // undo pixel packing
1009  GetBitContext gb_g;
1010  uint8_t *line;
1011  int pixel_bits = 8 >> pal->size_reduction;
1012 
1013  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1014  if (!line)
1015  return AVERROR(ENOMEM);
1016 
1017  for (y = 0; y < img->frame->height; y++) {
1018  p = GET_PIXEL(img->frame, 0, y);
1019  memcpy(line, p, img->frame->linesize[0]);
1020  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1021  skip_bits(&gb_g, 16);
1022  i = 0;
1023  for (x = 0; x < img->frame->width; x++) {
1024  p = GET_PIXEL(img->frame, x, y);
1025  p[2] = get_bits(&gb_g, pixel_bits);
1026  i++;
1027  if (i == 1 << pal->size_reduction) {
1028  skip_bits(&gb_g, 24);
1029  i = 0;
1030  }
1031  }
1032  }
1033  av_free(line);
1034  s->reduced_width = s->width; // we are back to full size
1035  }
1036 
1037  // switch to local palette if it's worth initializing it
1038  if (img->frame->height * img->frame->width > 300) {
1039  uint8_t palette[256 * 4];
1040  const int size = pal->frame->width * 4;
1041  av_assert0(size <= 1024U);
1042  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1043  // set extra entries to transparent black
1044  memset(palette + size, 0, 256 * 4 - size);
1045  for (y = 0; y < img->frame->height; y++) {
1046  for (x = 0; x < img->frame->width; x++) {
1047  p = GET_PIXEL(img->frame, x, y);
1048  i = p[2];
1049  AV_COPY32(p, &palette[i * 4]);
1050  }
1051  }
1052  } else {
1053  for (y = 0; y < img->frame->height; y++) {
1054  for (x = 0; x < img->frame->width; x++) {
1055  p = GET_PIXEL(img->frame, x, y);
1056  i = p[2];
1057  if (i >= pal->frame->width) {
1058  AV_WB32(p, 0x00000000);
1059  } else {
1060  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1061  AV_COPY32(p, pi);
1062  }
1063  }
1064  }
1065  }
1066 
1067  return 0;
1068 }
1069 
1070 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1071 {
1072  WebPContext *s = avctx->priv_data;
1073  if (s->width && s->width != w) {
1074  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1075  s->width, w);
1076  }
1077  s->width = w;
1078  if (s->height && s->height != h) {
1079  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1080  s->height, h);
1081  }
1082  s->height = h;
1083 }
1084 
1086  int *got_frame, uint8_t *data_start,
1087  unsigned int data_size, int is_alpha_chunk)
1088 {
1089  WebPContext *s = avctx->priv_data;
1090  int w, h, ret, i, used;
1091 
1092  if (!is_alpha_chunk) {
1093  s->lossless = 1;
1094  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1095  }
1096 
1097  ret = init_get_bits8(&s->gb, data_start, data_size);
1098  if (ret < 0)
1099  return ret;
1100 
1101  if (!is_alpha_chunk) {
1102  if (get_bits(&s->gb, 8) != 0x2F) {
1103  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1104  return AVERROR_INVALIDDATA;
1105  }
1106 
1107  w = get_bits(&s->gb, 14) + 1;
1108  h = get_bits(&s->gb, 14) + 1;
1109 
1110  update_canvas_size(avctx, w, h);
1111 
1112  ret = ff_set_dimensions(avctx, s->width, s->height);
1113  if (ret < 0)
1114  return ret;
1115 
1116  s->has_alpha = get_bits1(&s->gb);
1117 
1118  if (get_bits(&s->gb, 3) != 0x0) {
1119  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1120  return AVERROR_INVALIDDATA;
1121  }
1122  } else {
1123  if (!s->width || !s->height)
1124  return AVERROR_BUG;
1125  w = s->width;
1126  h = s->height;
1127  }
1128 
1129  /* parse transformations */
1130  s->nb_transforms = 0;
1131  s->reduced_width = s->width;
1132  used = 0;
1133  while (get_bits1(&s->gb)) {
1134  enum TransformType transform = get_bits(&s->gb, 2);
1135  if (used & (1 << transform)) {
1136  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1137  transform);
1139  goto free_and_return;
1140  }
1141  used |= (1 << transform);
1142  s->transforms[s->nb_transforms++] = transform;
1143  switch (transform) {
1144  case PREDICTOR_TRANSFORM:
1146  break;
1147  case COLOR_TRANSFORM:
1149  break;
1152  break;
1153  }
1154  if (ret < 0)
1155  goto free_and_return;
1156  }
1157 
1158  /* decode primary image */
1159  s->image[IMAGE_ROLE_ARGB].frame = p;
1160  if (is_alpha_chunk)
1161  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1163  if (ret < 0)
1164  goto free_and_return;
1165 
1166  /* apply transformations */
1167  for (i = s->nb_transforms - 1; i >= 0; i--) {
1168  switch (s->transforms[i]) {
1169  case PREDICTOR_TRANSFORM:
1171  break;
1172  case COLOR_TRANSFORM:
1174  break;
1175  case SUBTRACT_GREEN:
1177  break;
1180  break;
1181  }
1182  if (ret < 0)
1183  goto free_and_return;
1184  }
1185 
1186  *got_frame = 1;
1188  p->key_frame = 1;
1189  ret = data_size;
1190 
1191 free_and_return:
1192  for (i = 0; i < IMAGE_ROLE_NB; i++)
1193  image_ctx_free(&s->image[i]);
1194 
1195  return ret;
1196 }
1197 
1199 {
1200  int x, y, ls;
1201  uint8_t *dec;
1202 
1203  ls = frame->linesize[3];
1204 
1205  /* filter first row using horizontal filter */
1206  dec = frame->data[3] + 1;
1207  for (x = 1; x < frame->width; x++, dec++)
1208  *dec += *(dec - 1);
1209 
1210  /* filter first column using vertical filter */
1211  dec = frame->data[3] + ls;
1212  for (y = 1; y < frame->height; y++, dec += ls)
1213  *dec += *(dec - ls);
1214 
1215  /* filter the rest using the specified filter */
1216  switch (m) {
1218  for (y = 1; y < frame->height; y++) {
1219  dec = frame->data[3] + y * ls + 1;
1220  for (x = 1; x < frame->width; x++, dec++)
1221  *dec += *(dec - 1);
1222  }
1223  break;
1224  case ALPHA_FILTER_VERTICAL:
1225  for (y = 1; y < frame->height; y++) {
1226  dec = frame->data[3] + y * ls + 1;
1227  for (x = 1; x < frame->width; x++, dec++)
1228  *dec += *(dec - ls);
1229  }
1230  break;
1231  case ALPHA_FILTER_GRADIENT:
1232  for (y = 1; y < frame->height; y++) {
1233  dec = frame->data[3] + y * ls + 1;
1234  for (x = 1; x < frame->width; x++, dec++)
1235  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1236  }
1237  break;
1238  }
1239 }
1240 
1242  uint8_t *data_start,
1243  unsigned int data_size)
1244 {
1245  WebPContext *s = avctx->priv_data;
1246  int x, y, ret;
1247 
1248  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1249  GetByteContext gb;
1250 
1251  bytestream2_init(&gb, data_start, data_size);
1252  for (y = 0; y < s->height; y++)
1253  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1254  s->width);
1255  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1256  uint8_t *ap, *pp;
1257  int alpha_got_frame = 0;
1258 
1259  s->alpha_frame = av_frame_alloc();
1260  if (!s->alpha_frame)
1261  return AVERROR(ENOMEM);
1262 
1263  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1264  data_start, data_size, 1);
1265  if (ret < 0) {
1266  av_frame_free(&s->alpha_frame);
1267  return ret;
1268  }
1269  if (!alpha_got_frame) {
1270  av_frame_free(&s->alpha_frame);
1271  return AVERROR_INVALIDDATA;
1272  }
1273 
1274  /* copy green component of alpha image to alpha plane of primary image */
1275  for (y = 0; y < s->height; y++) {
1276  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1277  pp = p->data[3] + p->linesize[3] * y;
1278  for (x = 0; x < s->width; x++) {
1279  *pp = *ap;
1280  pp++;
1281  ap += 4;
1282  }
1283  }
1284  av_frame_free(&s->alpha_frame);
1285  }
1286 
1287  /* apply alpha filtering */
1288  if (s->alpha_filter)
1289  alpha_inverse_prediction(p, s->alpha_filter);
1290 
1291  return 0;
1292 }
1293 
1295  int *got_frame, uint8_t *data_start,
1296  unsigned int data_size)
1297 {
1298  WebPContext *s = avctx->priv_data;
1299  int ret;
1300 
1301  if (!s->initialized) {
1302  ff_vp8_decode_init(avctx);
1303  s->initialized = 1;
1304  s->v.actually_webp = 1;
1305  }
1306  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1307  s->lossless = 0;
1308 
1309  if (data_size > INT_MAX) {
1310  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1311  return AVERROR_PATCHWELCOME;
1312  }
1313 
1314  av_packet_unref(s->pkt);
1315  s->pkt->data = data_start;
1316  s->pkt->size = data_size;
1317 
1318  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1319  if (ret < 0)
1320  return ret;
1321 
1322  if (!*got_frame)
1323  return AVERROR_INVALIDDATA;
1324 
1325  update_canvas_size(avctx, avctx->width, avctx->height);
1326 
1327  if (s->has_alpha) {
1328  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1329  s->alpha_data_size);
1330  if (ret < 0)
1331  return ret;
1332  }
1333  return ret;
1334 }
1335 
1336 static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1337  AVPacket *avpkt)
1338 {
1339  AVFrame * const p = data;
1340  WebPContext *s = avctx->priv_data;
1341  GetByteContext gb;
1342  int ret;
1343  uint32_t chunk_type, chunk_size;
1344  int vp8x_flags = 0;
1345 
1346  s->avctx = avctx;
1347  s->width = 0;
1348  s->height = 0;
1349  *got_frame = 0;
1350  s->has_alpha = 0;
1351  s->has_exif = 0;
1352  s->has_iccp = 0;
1353  bytestream2_init(&gb, avpkt->data, avpkt->size);
1354 
1355  if (bytestream2_get_bytes_left(&gb) < 12)
1356  return AVERROR_INVALIDDATA;
1357 
1358  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1359  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1360  return AVERROR_INVALIDDATA;
1361  }
1362 
1363  chunk_size = bytestream2_get_le32(&gb);
1364  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1365  return AVERROR_INVALIDDATA;
1366 
1367  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1368  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1369  return AVERROR_INVALIDDATA;
1370  }
1371 
1372  while (bytestream2_get_bytes_left(&gb) > 8) {
1373  char chunk_str[5] = { 0 };
1374 
1375  chunk_type = bytestream2_get_le32(&gb);
1376  chunk_size = bytestream2_get_le32(&gb);
1377  if (chunk_size == UINT32_MAX)
1378  return AVERROR_INVALIDDATA;
1379  chunk_size += chunk_size & 1;
1380 
1381  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1382  /* we seem to be running out of data, but it could also be that the
1383  bitstream has trailing junk leading to bogus chunk_size. */
1384  break;
1385  }
1386 
1387  switch (chunk_type) {
1388  case MKTAG('V', 'P', '8', ' '):
1389  if (!*got_frame) {
1390  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1391  avpkt->data + bytestream2_tell(&gb),
1392  chunk_size);
1393  if (ret < 0)
1394  return ret;
1395  }
1396  bytestream2_skip(&gb, chunk_size);
1397  break;
1398  case MKTAG('V', 'P', '8', 'L'):
1399  if (!*got_frame) {
1400  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1401  avpkt->data + bytestream2_tell(&gb),
1402  chunk_size, 0);
1403  if (ret < 0)
1404  return ret;
1406  }
1407  bytestream2_skip(&gb, chunk_size);
1408  break;
1409  case MKTAG('V', 'P', '8', 'X'):
1410  if (s->width || s->height || *got_frame) {
1411  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1412  return AVERROR_INVALIDDATA;
1413  }
1414  vp8x_flags = bytestream2_get_byte(&gb);
1415  bytestream2_skip(&gb, 3);
1416  s->width = bytestream2_get_le24(&gb) + 1;
1417  s->height = bytestream2_get_le24(&gb) + 1;
1418  ret = av_image_check_size(s->width, s->height, 0, avctx);
1419  if (ret < 0)
1420  return ret;
1421  break;
1422  case MKTAG('A', 'L', 'P', 'H'): {
1423  int alpha_header, filter_m, compression;
1424 
1425  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1426  av_log(avctx, AV_LOG_WARNING,
1427  "ALPHA chunk present, but alpha bit not set in the "
1428  "VP8X header\n");
1429  }
1430  if (chunk_size == 0) {
1431  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  alpha_header = bytestream2_get_byte(&gb);
1435  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1436  s->alpha_data_size = chunk_size - 1;
1437  bytestream2_skip(&gb, s->alpha_data_size);
1438 
1439  filter_m = (alpha_header >> 2) & 0x03;
1440  compression = alpha_header & 0x03;
1441 
1442  if (compression > ALPHA_COMPRESSION_VP8L) {
1443  av_log(avctx, AV_LOG_VERBOSE,
1444  "skipping unsupported ALPHA chunk\n");
1445  } else {
1446  s->has_alpha = 1;
1447  s->alpha_compression = compression;
1448  s->alpha_filter = filter_m;
1449  }
1450 
1451  break;
1452  }
1453  case MKTAG('E', 'X', 'I', 'F'): {
1454  int le, ifd_offset, exif_offset = bytestream2_tell(&gb);
1455  AVDictionary *exif_metadata = NULL;
1456  GetByteContext exif_gb;
1457 
1458  if (s->has_exif) {
1459  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1460  goto exif_end;
1461  }
1462  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1463  av_log(avctx, AV_LOG_WARNING,
1464  "EXIF chunk present, but Exif bit not set in the "
1465  "VP8X header\n");
1466 
1467  s->has_exif = 1;
1468  bytestream2_init(&exif_gb, avpkt->data + exif_offset,
1469  avpkt->size - exif_offset);
1470  if (ff_tdecode_header(&exif_gb, &le, &ifd_offset) < 0) {
1471  av_log(avctx, AV_LOG_ERROR, "invalid TIFF header "
1472  "in Exif data\n");
1473  goto exif_end;
1474  }
1475 
1476  bytestream2_seek(&exif_gb, ifd_offset, SEEK_SET);
1477  if (ff_exif_decode_ifd(avctx, &exif_gb, le, 0, &exif_metadata) < 0) {
1478  av_log(avctx, AV_LOG_ERROR, "error decoding Exif data\n");
1479  goto exif_end;
1480  }
1481 
1482  av_dict_copy(&((AVFrame *) data)->metadata, exif_metadata, 0);
1483 
1484 exif_end:
1485  av_dict_free(&exif_metadata);
1486  bytestream2_skip(&gb, chunk_size);
1487  break;
1488  }
1489  case MKTAG('I', 'C', 'C', 'P'): {
1490  AVFrameSideData *sd;
1491 
1492  if (s->has_iccp) {
1493  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1494  bytestream2_skip(&gb, chunk_size);
1495  break;
1496  }
1497  if (!(vp8x_flags & VP8X_FLAG_ICC))
1498  av_log(avctx, AV_LOG_WARNING,
1499  "ICCP chunk present, but ICC Profile bit not set in the "
1500  "VP8X header\n");
1501 
1502  s->has_iccp = 1;
1504  if (!sd)
1505  return AVERROR(ENOMEM);
1506 
1507  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1508  break;
1509  }
1510  case MKTAG('A', 'N', 'I', 'M'):
1511  case MKTAG('A', 'N', 'M', 'F'):
1512  case MKTAG('X', 'M', 'P', ' '):
1513  AV_WL32(chunk_str, chunk_type);
1514  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1515  chunk_str);
1516  bytestream2_skip(&gb, chunk_size);
1517  break;
1518  default:
1519  AV_WL32(chunk_str, chunk_type);
1520  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1521  chunk_str);
1522  bytestream2_skip(&gb, chunk_size);
1523  break;
1524  }
1525  }
1526 
1527  if (!*got_frame) {
1528  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1529  return AVERROR_INVALIDDATA;
1530  }
1531 
1532  return avpkt->size;
1533 }
1534 
1536 {
1537  WebPContext *s = avctx->priv_data;
1538 
1539  s->pkt = av_packet_alloc();
1540  if (!s->pkt)
1541  return AVERROR(ENOMEM);
1542 
1543  return 0;
1544 }
1545 
1547 {
1548  WebPContext *s = avctx->priv_data;
1549 
1550  av_packet_free(&s->pkt);
1551 
1552  if (s->initialized)
1553  return ff_vp8_decode_free(avctx);
1554 
1555  return 0;
1556 }
1557 
1559  .name = "webp",
1560  .long_name = NULL_IF_CONFIG_SMALL("WebP image"),
1561  .type = AVMEDIA_TYPE_VIDEO,
1562  .id = AV_CODEC_ID_WEBP,
1563  .priv_data_size = sizeof(WebPContext),
1566  .close = webp_decode_close,
1567  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1568  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1569 };
WebPContext::width
int width
Definition: webp.c:205
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:194
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:417
AVCodec
AVCodec.
Definition: codec.h:202
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2824
HuffReader::vlc
VLC vlc
Definition: webp.c:171
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:864
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:67
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1241
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1294
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:246
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1085
INIT_VLC_OUTPUT_LE
#define INIT_VLC_OUTPUT_LE
Definition: vlc.h:93
color
Definition: vf_paletteuse.c:599
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:123
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:105
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:174
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
GetByteContext
Definition: bytestream.h:33
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:233
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:103
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:118
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:783
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:769
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:310
AVFrame::width
int width
Definition: frame.h:380
w
uint8_t w
Definition: llviddspenc.c:38
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:221
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:130
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:114
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:113
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:182
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:476
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:126
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:798
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
thread.h
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, int alphabet_size)
Definition: webp.c:252
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:210
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:121
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:125
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:329
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
HuffReader::simple
int simple
Definition: webp.c:172
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:122
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:199
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:840
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1336
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:820
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:196
finish
static void finish(void)
Definition: movenc.c:342
U
#define U(x)
Definition: vp56_arith.h:37
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:99
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:209
GetBitContext
Definition: get_bits.h:62
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1070
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:400
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:202
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:889
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:112
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:56
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:776
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:953
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:550
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:135
WebPContext::has_exif
int has_exif
Definition: webp.c:203
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:329
WebPContext::has_alpha
int has_alpha
Definition: webp.c:198
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
PredictionMode
PredictionMode
Definition: webp.c:117
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ImageContext::frame
AVFrame * frame
Definition: webp.c:179
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1823
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:900
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
TransformType
TransformType
Definition: webp.c:110
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:127
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:63
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:76
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:544
bits
uint8_t bits
Definition: vp3data.h:141
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:66
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:850
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:62
ImageContext
Definition: webp.c:177
get_bits.h
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:404
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:181
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:218
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:188
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:124
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1546
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:183
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:983
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:173
WebPContext::height
int height
Definition: webp.c:206
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:104
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:873
WebPContext::alpha_data
uint8_t * alpha_data
Definition: webp.c:201
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:139
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:893
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:180
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:492
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1535
WebPContext::v
VP8Context v
Definition: webp.c:192
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:70
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:64
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:158
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2810
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
ff_vp8_decode_init
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2894
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1198
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:165
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:405
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:755
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:167
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:58
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AlphaCompression
AlphaCompression
Definition: webp.c:98
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:111
ImageContext::size_reduction
int size_reduction
Definition: webp.c:187
size
int size
Definition: twinvq_data.h:10344
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:218
ImageContext::role
enum ImageRole role
Definition: webp.c:178
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:430
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:959
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:57
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:116
pt
int pt
Definition: rtp.c:35
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HuffReader
Definition: webp.c:170
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:460
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:128
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:107
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:214
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:790
WebPContext::lossless
int lossless
Definition: webp.c:207
WebPContext::reduced_width
int reduced_width
Definition: webp.c:213
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:65
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1822
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:195
AlphaFilter
AlphaFilter
Definition: webp.c:103
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:129
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:80
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
WebPContext::gb
GetBitContext gb
Definition: webp.c:193
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:922
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HuffmanIndex
HuffmanIndex
Definition: webp.c:134
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:222
len
int len
Definition: vorbis_enc_data.h:426
exif.h
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:810
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:241
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:880
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:215
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:800
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ThreadFrame
Definition: thread.h:34
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:137
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:154
VLC
Definition: vlc.h:26
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:224
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
WebPContext::initialized
int initialized
Definition: webp.c:197
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:998
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:216
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:68
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:106
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:424
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:119
WebPContext
Definition: webp.c:191
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
VP8Context
Definition: vp8.h:148
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
ImageRole
ImageRole
Definition: webp.c:148
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:233
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:353
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:314
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_webp_decoder
const AVCodec ff_webp_decoder
Definition: webp.c:1558
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:138
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:204
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:527
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:830
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:100
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:762
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:120
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:200
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:136
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:150
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:131
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:162