FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 
45 #define BITSTREAM_READER_LE
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "codec_internal.h"
49 #include "exif.h"
50 #include "get_bits.h"
51 #include "internal.h"
52 #include "thread.h"
53 #include "tiff_common.h"
54 #include "vp8.h"
55 
56 #define VP8X_FLAG_ANIMATION 0x02
57 #define VP8X_FLAG_XMP_METADATA 0x04
58 #define VP8X_FLAG_EXIF_METADATA 0x08
59 #define VP8X_FLAG_ALPHA 0x10
60 #define VP8X_FLAG_ICC 0x20
61 
62 #define MAX_PALETTE_SIZE 256
63 #define MAX_CACHE_BITS 11
64 #define NUM_CODE_LENGTH_CODES 19
65 #define HUFFMAN_CODES_PER_META_CODE 5
66 #define NUM_LITERAL_CODES 256
67 #define NUM_LENGTH_CODES 24
68 #define NUM_DISTANCE_CODES 40
69 #define NUM_SHORT_DISTANCES 120
70 #define MAX_HUFFMAN_CODE_LENGTH 15
71 
72 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
76 };
77 
79  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
80 };
81 
82 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
83  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
84  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
85  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
86  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
87  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
88  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
89  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
90  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
91  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
92  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
93  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
94  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
95  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
96  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
97  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
98 };
99 
103 };
104 
110 };
111 
117 };
118 
134 };
135 
142 };
143 
144 /* The structure of WebP lossless is an optional series of transformation data,
145  * followed by the primary image. The primary image also optionally contains
146  * an entropy group mapping if there are multiple entropy groups. There is a
147  * basic image type called an "entropy coded image" that is used for all of
148  * these. The type of each entropy coded image is referred to by the
149  * specification as its role. */
150 enum ImageRole {
151  /* Primary Image: Stores the actual pixels of the image. */
153 
154  /* Entropy Image: Defines which Huffman group to use for different areas of
155  * the primary image. */
157 
158  /* Predictors: Defines which predictor type to use for different areas of
159  * the primary image. */
161 
162  /* Color Transform Data: Defines the color transformation for different
163  * areas of the primary image. */
165 
166  /* Color Index: Stored as an image of height == 1. */
168 
170 };
171 
172 typedef struct HuffReader {
173  VLC vlc; /* Huffman decoder context */
174  int simple; /* whether to use simple mode */
175  int nb_symbols; /* number of coded symbols */
176  uint16_t simple_symbols[2]; /* symbols for simple mode */
177 } HuffReader;
178 
179 typedef struct ImageContext {
180  enum ImageRole role; /* role of this image */
181  AVFrame *frame; /* AVFrame for data */
182  int color_cache_bits; /* color cache size, log2 */
183  uint32_t *color_cache; /* color cache data */
184  int nb_huffman_groups; /* number of huffman groups */
185  HuffReader *huffman_groups; /* reader for each huffman group */
186  /* relative size compared to primary image, log2.
187  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
188  * number of pixels per byte in the primary image (pixel packing) */
191 } ImageContext;
192 
193 typedef struct WebPContext {
194  VP8Context v; /* VP8 Context used for lossy decoding */
195  GetBitContext gb; /* bitstream reader for main image chunk */
196  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
197  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
198  AVCodecContext *avctx; /* parent AVCodecContext */
199  int initialized; /* set once the VP8 context is initialized */
200  int has_alpha; /* has a separate alpha chunk */
201  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
202  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
203  const uint8_t *alpha_data; /* alpha chunk data */
204  int alpha_data_size; /* alpha chunk data size */
205  int has_exif; /* set after an EXIF chunk has been processed */
206  int has_iccp; /* set after an ICCP chunk has been processed */
207  int width; /* image width */
208  int height; /* image height */
209  int lossless; /* indicates lossless or lossy */
210 
211  int nb_transforms; /* number of transforms */
212  enum TransformType transforms[4]; /* transformations used in the image, in order */
213  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
214  * before pixels are unpacked, or same as width otherwise. */
216  int nb_huffman_groups; /* number of huffman groups in the primary image */
217  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
218 } WebPContext;
219 
220 #define GET_PIXEL(frame, x, y) \
221  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
222 
223 #define GET_PIXEL_COMP(frame, x, y, c) \
224  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
225 
227 {
228  int i, j;
229 
230  av_free(img->color_cache);
231  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
232  av_frame_free(&img->frame);
233  if (img->huffman_groups) {
234  for (i = 0; i < img->nb_huffman_groups; i++) {
235  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
236  ff_free_vlc(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
237  }
238  av_free(img->huffman_groups);
239  }
240  memset(img, 0, sizeof(*img));
241 }
242 
244 {
245  if (r->simple) {
246  if (r->nb_symbols == 1)
247  return r->simple_symbols[0];
248  else
249  return r->simple_symbols[get_bits1(gb)];
250  } else
251  return get_vlc2(gb, r->vlc.table, 8, 2);
252 }
253 
254 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
255  int alphabet_size)
256 {
257  int len = 0, sym, code = 0, ret;
258  int max_code_length = 0;
259  uint16_t *codes;
260 
261  /* special-case 1 symbol since the vlc reader cannot handle it */
262  for (sym = 0; sym < alphabet_size; sym++) {
263  if (code_lengths[sym] > 0) {
264  len++;
265  code = sym;
266  if (len > 1)
267  break;
268  }
269  }
270  if (len == 1) {
271  r->nb_symbols = 1;
272  r->simple_symbols[0] = code;
273  r->simple = 1;
274  return 0;
275  }
276 
277  for (sym = 0; sym < alphabet_size; sym++)
278  max_code_length = FFMAX(max_code_length, code_lengths[sym]);
279 
280  if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
281  return AVERROR(EINVAL);
282 
283  codes = av_malloc_array(alphabet_size, sizeof(*codes));
284  if (!codes)
285  return AVERROR(ENOMEM);
286 
287  code = 0;
288  r->nb_symbols = 0;
289  for (len = 1; len <= max_code_length; len++) {
290  for (sym = 0; sym < alphabet_size; sym++) {
291  if (code_lengths[sym] != len)
292  continue;
293  codes[sym] = code++;
294  r->nb_symbols++;
295  }
296  code <<= 1;
297  }
298  if (!r->nb_symbols) {
299  av_free(codes);
300  return AVERROR_INVALIDDATA;
301  }
302 
303  ret = init_vlc(&r->vlc, 8, alphabet_size,
304  code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
305  codes, sizeof(*codes), sizeof(*codes), INIT_VLC_OUTPUT_LE);
306  if (ret < 0) {
307  av_free(codes);
308  return ret;
309  }
310  r->simple = 0;
311 
312  av_free(codes);
313  return 0;
314 }
315 
317 {
318  hc->nb_symbols = get_bits1(&s->gb) + 1;
319 
320  if (get_bits1(&s->gb))
321  hc->simple_symbols[0] = get_bits(&s->gb, 8);
322  else
323  hc->simple_symbols[0] = get_bits1(&s->gb);
324 
325  if (hc->nb_symbols == 2)
326  hc->simple_symbols[1] = get_bits(&s->gb, 8);
327 
328  hc->simple = 1;
329 }
330 
332  int alphabet_size)
333 {
334  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
335  uint8_t *code_lengths;
336  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
337  int i, symbol, max_symbol, prev_code_len, ret;
338  int num_codes = 4 + get_bits(&s->gb, 4);
339 
340  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
341 
342  for (i = 0; i < num_codes; i++)
343  code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
344 
345  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
347  if (ret < 0)
348  return ret;
349 
350  code_lengths = av_mallocz(alphabet_size);
351  if (!code_lengths) {
352  ret = AVERROR(ENOMEM);
353  goto finish;
354  }
355 
356  if (get_bits1(&s->gb)) {
357  int bits = 2 + 2 * get_bits(&s->gb, 3);
358  max_symbol = 2 + get_bits(&s->gb, bits);
359  if (max_symbol > alphabet_size) {
360  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
361  max_symbol, alphabet_size);
363  goto finish;
364  }
365  } else {
366  max_symbol = alphabet_size;
367  }
368 
369  prev_code_len = 8;
370  symbol = 0;
371  while (symbol < alphabet_size) {
372  int code_len;
373 
374  if (!max_symbol--)
375  break;
376  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
377  if (code_len < 16) {
378  /* Code length code [0..15] indicates literal code lengths. */
379  code_lengths[symbol++] = code_len;
380  if (code_len)
381  prev_code_len = code_len;
382  } else {
383  int repeat = 0, length = 0;
384  switch (code_len) {
385  case 16:
386  /* Code 16 repeats the previous non-zero value [3..6] times,
387  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
388  * non-zero value has been emitted, a value of 8 is repeated. */
389  repeat = 3 + get_bits(&s->gb, 2);
390  length = prev_code_len;
391  break;
392  case 17:
393  /* Code 17 emits a streak of zeros [3..10], i.e.,
394  * 3 + ReadBits(3) times. */
395  repeat = 3 + get_bits(&s->gb, 3);
396  break;
397  case 18:
398  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
399  * 11 + ReadBits(7) times. */
400  repeat = 11 + get_bits(&s->gb, 7);
401  break;
402  }
403  if (symbol + repeat > alphabet_size) {
404  av_log(s->avctx, AV_LOG_ERROR,
405  "invalid symbol %d + repeat %d > alphabet size %d\n",
406  symbol, repeat, alphabet_size);
408  goto finish;
409  }
410  while (repeat-- > 0)
411  code_lengths[symbol++] = length;
412  }
413  }
414 
415  ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
416 
417 finish:
418  ff_free_vlc(&code_len_hc.vlc);
419  av_free(code_lengths);
420  return ret;
421 }
422 
423 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
424  int w, int h);
425 
426 #define PARSE_BLOCK_SIZE(w, h) do { \
427  block_bits = get_bits(&s->gb, 3) + 2; \
428  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
429  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
430 } while (0)
431 
433 {
434  ImageContext *img;
435  int ret, block_bits, blocks_w, blocks_h, x, y, max;
436 
437  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
438 
439  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
440  if (ret < 0)
441  return ret;
442 
443  img = &s->image[IMAGE_ROLE_ENTROPY];
444  img->size_reduction = block_bits;
445 
446  /* the number of huffman groups is determined by the maximum group number
447  * coded in the entropy image */
448  max = 0;
449  for (y = 0; y < img->frame->height; y++) {
450  for (x = 0; x < img->frame->width; x++) {
451  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
452  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
453  int p = p0 << 8 | p1;
454  max = FFMAX(max, p);
455  }
456  }
457  s->nb_huffman_groups = max + 1;
458 
459  return 0;
460 }
461 
463 {
464  int block_bits, blocks_w, blocks_h, ret;
465 
466  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
467 
469  blocks_h);
470  if (ret < 0)
471  return ret;
472 
473  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
474 
475  return 0;
476 }
477 
479 {
480  int block_bits, blocks_w, blocks_h, ret;
481 
482  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
483 
485  blocks_h);
486  if (ret < 0)
487  return ret;
488 
489  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
490 
491  return 0;
492 }
493 
495 {
496  ImageContext *img;
497  int width_bits, index_size, ret, x;
498  uint8_t *ct;
499 
500  index_size = get_bits(&s->gb, 8) + 1;
501 
502  if (index_size <= 2)
503  width_bits = 3;
504  else if (index_size <= 4)
505  width_bits = 2;
506  else if (index_size <= 16)
507  width_bits = 1;
508  else
509  width_bits = 0;
510 
512  index_size, 1);
513  if (ret < 0)
514  return ret;
515 
516  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
517  img->size_reduction = width_bits;
518  if (width_bits > 0)
519  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
520 
521  /* color index values are delta-coded */
522  ct = img->frame->data[0] + 4;
523  for (x = 4; x < img->frame->width * 4; x++, ct++)
524  ct[0] += ct[-4];
525 
526  return 0;
527 }
528 
530  int x, int y)
531 {
532  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
533  int group = 0;
534 
535  if (gimg->size_reduction > 0) {
536  int group_x = x >> gimg->size_reduction;
537  int group_y = y >> gimg->size_reduction;
538  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
539  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
540  group = g0 << 8 | g1;
541  }
542 
543  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
544 }
545 
547 {
548  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
549  img->color_cache[cache_idx] = c;
550 }
551 
553  int w, int h)
554 {
555  ImageContext *img;
556  HuffReader *hg;
557  int i, j, ret, x, y, width;
558 
559  img = &s->image[role];
560  img->role = role;
561 
562  if (!img->frame) {
563  img->frame = av_frame_alloc();
564  if (!img->frame)
565  return AVERROR(ENOMEM);
566  }
567 
568  img->frame->format = AV_PIX_FMT_ARGB;
569  img->frame->width = w;
570  img->frame->height = h;
571 
572  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
573  ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
574  } else
575  ret = av_frame_get_buffer(img->frame, 1);
576  if (ret < 0)
577  return ret;
578 
579  if (get_bits1(&s->gb)) {
580  img->color_cache_bits = get_bits(&s->gb, 4);
581  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
582  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
583  img->color_cache_bits);
584  return AVERROR_INVALIDDATA;
585  }
586  img->color_cache = av_calloc(1 << img->color_cache_bits,
587  sizeof(*img->color_cache));
588  if (!img->color_cache)
589  return AVERROR(ENOMEM);
590  } else {
591  img->color_cache_bits = 0;
592  }
593 
594  img->nb_huffman_groups = 1;
595  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
597  if (ret < 0)
598  return ret;
599  img->nb_huffman_groups = s->nb_huffman_groups;
600  }
601  img->huffman_groups = av_calloc(img->nb_huffman_groups,
603  sizeof(*img->huffman_groups));
604  if (!img->huffman_groups)
605  return AVERROR(ENOMEM);
606 
607  for (i = 0; i < img->nb_huffman_groups; i++) {
608  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
609  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
610  int alphabet_size = alphabet_sizes[j];
611  if (!j && img->color_cache_bits > 0)
612  alphabet_size += 1 << img->color_cache_bits;
613 
614  if (get_bits1(&s->gb)) {
615  read_huffman_code_simple(s, &hg[j]);
616  } else {
617  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
618  if (ret < 0)
619  return ret;
620  }
621  }
622  }
623 
624  width = img->frame->width;
625  if (role == IMAGE_ROLE_ARGB)
626  width = s->reduced_width;
627 
628  x = 0; y = 0;
629  while (y < img->frame->height) {
630  int v;
631 
632  if (get_bits_left(&s->gb) < 0)
633  return AVERROR_INVALIDDATA;
634 
635  hg = get_huffman_group(s, img, x, y);
636  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
637  if (v < NUM_LITERAL_CODES) {
638  /* literal pixel values */
639  uint8_t *p = GET_PIXEL(img->frame, x, y);
640  p[2] = v;
641  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
642  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
643  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
644  if (img->color_cache_bits)
646  x++;
647  if (x == width) {
648  x = 0;
649  y++;
650  }
651  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
652  /* LZ77 backwards mapping */
653  int prefix_code, length, distance, ref_x, ref_y;
654 
655  /* parse length and distance */
656  prefix_code = v - NUM_LITERAL_CODES;
657  if (prefix_code < 4) {
658  length = prefix_code + 1;
659  } else {
660  int extra_bits = (prefix_code - 2) >> 1;
661  int offset = 2 + (prefix_code & 1) << extra_bits;
662  length = offset + get_bits(&s->gb, extra_bits) + 1;
663  }
664  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
665  if (prefix_code > 39U) {
666  av_log(s->avctx, AV_LOG_ERROR,
667  "distance prefix code too large: %d\n", prefix_code);
668  return AVERROR_INVALIDDATA;
669  }
670  if (prefix_code < 4) {
671  distance = prefix_code + 1;
672  } else {
673  int extra_bits = prefix_code - 2 >> 1;
674  int offset = 2 + (prefix_code & 1) << extra_bits;
675  distance = offset + get_bits(&s->gb, extra_bits) + 1;
676  }
677 
678  /* find reference location */
679  if (distance <= NUM_SHORT_DISTANCES) {
680  int xi = lz77_distance_offsets[distance - 1][0];
681  int yi = lz77_distance_offsets[distance - 1][1];
682  distance = FFMAX(1, xi + yi * width);
683  } else {
685  }
686  ref_x = x;
687  ref_y = y;
688  if (distance <= x) {
689  ref_x -= distance;
690  distance = 0;
691  } else {
692  ref_x = 0;
693  distance -= x;
694  }
695  while (distance >= width) {
696  ref_y--;
697  distance -= width;
698  }
699  if (distance > 0) {
700  ref_x = width - distance;
701  ref_y--;
702  }
703  ref_x = FFMAX(0, ref_x);
704  ref_y = FFMAX(0, ref_y);
705 
706  /* copy pixels
707  * source and dest regions can overlap and wrap lines, so just
708  * copy per-pixel */
709  for (i = 0; i < length; i++) {
710  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
711  uint8_t *p = GET_PIXEL(img->frame, x, y);
712 
713  AV_COPY32(p, p_ref);
714  if (img->color_cache_bits)
716  x++;
717  ref_x++;
718  if (x == width) {
719  x = 0;
720  y++;
721  }
722  if (ref_x == width) {
723  ref_x = 0;
724  ref_y++;
725  }
726  if (y == img->frame->height || ref_y == img->frame->height)
727  break;
728  }
729  } else {
730  /* read from color cache */
731  uint8_t *p = GET_PIXEL(img->frame, x, y);
732  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
733 
734  if (!img->color_cache_bits) {
735  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
736  return AVERROR_INVALIDDATA;
737  }
738  if (cache_idx >= 1 << img->color_cache_bits) {
739  av_log(s->avctx, AV_LOG_ERROR,
740  "color cache index out-of-bounds\n");
741  return AVERROR_INVALIDDATA;
742  }
743  AV_WB32(p, img->color_cache[cache_idx]);
744  x++;
745  if (x == width) {
746  x = 0;
747  y++;
748  }
749  }
750  }
751 
752  return 0;
753 }
754 
755 /* PRED_MODE_BLACK */
756 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
757  const uint8_t *p_t, const uint8_t *p_tr)
758 {
759  AV_WB32(p, 0xFF000000);
760 }
761 
762 /* PRED_MODE_L */
763 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
764  const uint8_t *p_t, const uint8_t *p_tr)
765 {
766  AV_COPY32(p, p_l);
767 }
768 
769 /* PRED_MODE_T */
770 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
771  const uint8_t *p_t, const uint8_t *p_tr)
772 {
773  AV_COPY32(p, p_t);
774 }
775 
776 /* PRED_MODE_TR */
777 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
778  const uint8_t *p_t, const uint8_t *p_tr)
779 {
780  AV_COPY32(p, p_tr);
781 }
782 
783 /* PRED_MODE_TL */
784 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
785  const uint8_t *p_t, const uint8_t *p_tr)
786 {
787  AV_COPY32(p, p_tl);
788 }
789 
790 /* PRED_MODE_AVG_T_AVG_L_TR */
791 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
792  const uint8_t *p_t, const uint8_t *p_tr)
793 {
794  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
795  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
796  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
797  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
798 }
799 
800 /* PRED_MODE_AVG_L_TL */
801 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
802  const uint8_t *p_t, const uint8_t *p_tr)
803 {
804  p[0] = p_l[0] + p_tl[0] >> 1;
805  p[1] = p_l[1] + p_tl[1] >> 1;
806  p[2] = p_l[2] + p_tl[2] >> 1;
807  p[3] = p_l[3] + p_tl[3] >> 1;
808 }
809 
810 /* PRED_MODE_AVG_L_T */
811 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
812  const uint8_t *p_t, const uint8_t *p_tr)
813 {
814  p[0] = p_l[0] + p_t[0] >> 1;
815  p[1] = p_l[1] + p_t[1] >> 1;
816  p[2] = p_l[2] + p_t[2] >> 1;
817  p[3] = p_l[3] + p_t[3] >> 1;
818 }
819 
820 /* PRED_MODE_AVG_TL_T */
821 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
822  const uint8_t *p_t, const uint8_t *p_tr)
823 {
824  p[0] = p_tl[0] + p_t[0] >> 1;
825  p[1] = p_tl[1] + p_t[1] >> 1;
826  p[2] = p_tl[2] + p_t[2] >> 1;
827  p[3] = p_tl[3] + p_t[3] >> 1;
828 }
829 
830 /* PRED_MODE_AVG_T_TR */
831 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
832  const uint8_t *p_t, const uint8_t *p_tr)
833 {
834  p[0] = p_t[0] + p_tr[0] >> 1;
835  p[1] = p_t[1] + p_tr[1] >> 1;
836  p[2] = p_t[2] + p_tr[2] >> 1;
837  p[3] = p_t[3] + p_tr[3] >> 1;
838 }
839 
840 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
841 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
842  const uint8_t *p_t, const uint8_t *p_tr)
843 {
844  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
845  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
846  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
847  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
848 }
849 
850 /* PRED_MODE_SELECT */
851 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
852  const uint8_t *p_t, const uint8_t *p_tr)
853 {
854  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
855  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
856  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
857  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
858  if (diff <= 0)
859  AV_COPY32(p, p_t);
860  else
861  AV_COPY32(p, p_l);
862 }
863 
864 /* PRED_MODE_ADD_SUBTRACT_FULL */
865 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
866  const uint8_t *p_t, const uint8_t *p_tr)
867 {
868  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
869  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
870  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
871  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
872 }
873 
874 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
875 {
876  int d = a + b >> 1;
877  return av_clip_uint8(d + (d - c) / 2);
878 }
879 
880 /* PRED_MODE_ADD_SUBTRACT_HALF */
881 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
882  const uint8_t *p_t, const uint8_t *p_tr)
883 {
884  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
885  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
886  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
887  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
888 }
889 
890 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
891  const uint8_t *p_tl, const uint8_t *p_t,
892  const uint8_t *p_tr);
893 
894 static const inv_predict_func inverse_predict[14] = {
899 };
900 
901 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
902 {
903  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
904  uint8_t p[4];
905 
906  dec = GET_PIXEL(frame, x, y);
907  p_l = GET_PIXEL(frame, x - 1, y);
908  p_tl = GET_PIXEL(frame, x - 1, y - 1);
909  p_t = GET_PIXEL(frame, x, y - 1);
910  if (x == frame->width - 1)
911  p_tr = GET_PIXEL(frame, 0, y);
912  else
913  p_tr = GET_PIXEL(frame, x + 1, y - 1);
914 
915  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
916 
917  dec[0] += p[0];
918  dec[1] += p[1];
919  dec[2] += p[2];
920  dec[3] += p[3];
921 }
922 
924 {
925  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
926  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
927  int x, y;
928 
929  for (y = 0; y < img->frame->height; y++) {
930  for (x = 0; x < s->reduced_width; x++) {
931  int tx = x >> pimg->size_reduction;
932  int ty = y >> pimg->size_reduction;
933  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
934 
935  if (x == 0) {
936  if (y == 0)
937  m = PRED_MODE_BLACK;
938  else
939  m = PRED_MODE_T;
940  } else if (y == 0)
941  m = PRED_MODE_L;
942 
943  if (m > 13) {
944  av_log(s->avctx, AV_LOG_ERROR,
945  "invalid predictor mode: %d\n", m);
946  return AVERROR_INVALIDDATA;
947  }
948  inverse_prediction(img->frame, m, x, y);
949  }
950  }
951  return 0;
952 }
953 
954 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
955  uint8_t color)
956 {
957  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
958 }
959 
961 {
962  ImageContext *img, *cimg;
963  int x, y, cx, cy;
964  uint8_t *p, *cp;
965 
966  img = &s->image[IMAGE_ROLE_ARGB];
967  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
968 
969  for (y = 0; y < img->frame->height; y++) {
970  for (x = 0; x < s->reduced_width; x++) {
971  cx = x >> cimg->size_reduction;
972  cy = y >> cimg->size_reduction;
973  cp = GET_PIXEL(cimg->frame, cx, cy);
974  p = GET_PIXEL(img->frame, x, y);
975 
976  p[1] += color_transform_delta(cp[3], p[2]);
977  p[3] += color_transform_delta(cp[2], p[2]) +
978  color_transform_delta(cp[1], p[1]);
979  }
980  }
981  return 0;
982 }
983 
985 {
986  int x, y;
987  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
988 
989  for (y = 0; y < img->frame->height; y++) {
990  for (x = 0; x < s->reduced_width; x++) {
991  uint8_t *p = GET_PIXEL(img->frame, x, y);
992  p[1] += p[2];
993  p[3] += p[2];
994  }
995  }
996  return 0;
997 }
998 
1000 {
1001  ImageContext *img;
1002  ImageContext *pal;
1003  int i, x, y;
1004  uint8_t *p;
1005 
1006  img = &s->image[IMAGE_ROLE_ARGB];
1007  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1008 
1009  if (pal->size_reduction > 0) { // undo pixel packing
1010  GetBitContext gb_g;
1011  uint8_t *line;
1012  int pixel_bits = 8 >> pal->size_reduction;
1013 
1014  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1015  if (!line)
1016  return AVERROR(ENOMEM);
1017 
1018  for (y = 0; y < img->frame->height; y++) {
1019  p = GET_PIXEL(img->frame, 0, y);
1020  memcpy(line, p, img->frame->linesize[0]);
1021  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1022  skip_bits(&gb_g, 16);
1023  i = 0;
1024  for (x = 0; x < img->frame->width; x++) {
1025  p = GET_PIXEL(img->frame, x, y);
1026  p[2] = get_bits(&gb_g, pixel_bits);
1027  i++;
1028  if (i == 1 << pal->size_reduction) {
1029  skip_bits(&gb_g, 24);
1030  i = 0;
1031  }
1032  }
1033  }
1034  av_free(line);
1035  s->reduced_width = s->width; // we are back to full size
1036  }
1037 
1038  // switch to local palette if it's worth initializing it
1039  if (img->frame->height * img->frame->width > 300) {
1040  uint8_t palette[256 * 4];
1041  const int size = pal->frame->width * 4;
1042  av_assert0(size <= 1024U);
1043  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1044  // set extra entries to transparent black
1045  memset(palette + size, 0, 256 * 4 - size);
1046  for (y = 0; y < img->frame->height; y++) {
1047  for (x = 0; x < img->frame->width; x++) {
1048  p = GET_PIXEL(img->frame, x, y);
1049  i = p[2];
1050  AV_COPY32(p, &palette[i * 4]);
1051  }
1052  }
1053  } else {
1054  for (y = 0; y < img->frame->height; y++) {
1055  for (x = 0; x < img->frame->width; x++) {
1056  p = GET_PIXEL(img->frame, x, y);
1057  i = p[2];
1058  if (i >= pal->frame->width) {
1059  AV_WB32(p, 0x00000000);
1060  } else {
1061  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1062  AV_COPY32(p, pi);
1063  }
1064  }
1065  }
1066  }
1067 
1068  return 0;
1069 }
1070 
1071 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1072 {
1073  WebPContext *s = avctx->priv_data;
1074  if (s->width && s->width != w) {
1075  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1076  s->width, w);
1077  }
1078  s->width = w;
1079  if (s->height && s->height != h) {
1080  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1081  s->height, h);
1082  }
1083  s->height = h;
1084 }
1085 
1087  int *got_frame, const uint8_t *data_start,
1088  unsigned int data_size, int is_alpha_chunk)
1089 {
1090  WebPContext *s = avctx->priv_data;
1091  int w, h, ret, i, used;
1092 
1093  if (!is_alpha_chunk) {
1094  s->lossless = 1;
1095  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1096  }
1097 
1098  ret = init_get_bits8(&s->gb, data_start, data_size);
1099  if (ret < 0)
1100  return ret;
1101 
1102  if (!is_alpha_chunk) {
1103  if (get_bits(&s->gb, 8) != 0x2F) {
1104  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1105  return AVERROR_INVALIDDATA;
1106  }
1107 
1108  w = get_bits(&s->gb, 14) + 1;
1109  h = get_bits(&s->gb, 14) + 1;
1110 
1111  update_canvas_size(avctx, w, h);
1112 
1113  ret = ff_set_dimensions(avctx, s->width, s->height);
1114  if (ret < 0)
1115  return ret;
1116 
1117  s->has_alpha = get_bits1(&s->gb);
1118 
1119  if (get_bits(&s->gb, 3) != 0x0) {
1120  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1121  return AVERROR_INVALIDDATA;
1122  }
1123  } else {
1124  if (!s->width || !s->height)
1125  return AVERROR_BUG;
1126  w = s->width;
1127  h = s->height;
1128  }
1129 
1130  /* parse transformations */
1131  s->nb_transforms = 0;
1132  s->reduced_width = s->width;
1133  used = 0;
1134  while (get_bits1(&s->gb)) {
1135  enum TransformType transform = get_bits(&s->gb, 2);
1136  if (used & (1 << transform)) {
1137  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1138  transform);
1140  goto free_and_return;
1141  }
1142  used |= (1 << transform);
1143  s->transforms[s->nb_transforms++] = transform;
1144  switch (transform) {
1145  case PREDICTOR_TRANSFORM:
1147  break;
1148  case COLOR_TRANSFORM:
1150  break;
1153  break;
1154  }
1155  if (ret < 0)
1156  goto free_and_return;
1157  }
1158 
1159  /* decode primary image */
1160  s->image[IMAGE_ROLE_ARGB].frame = p;
1161  if (is_alpha_chunk)
1162  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1164  if (ret < 0)
1165  goto free_and_return;
1166 
1167  /* apply transformations */
1168  for (i = s->nb_transforms - 1; i >= 0; i--) {
1169  switch (s->transforms[i]) {
1170  case PREDICTOR_TRANSFORM:
1172  break;
1173  case COLOR_TRANSFORM:
1175  break;
1176  case SUBTRACT_GREEN:
1178  break;
1181  break;
1182  }
1183  if (ret < 0)
1184  goto free_and_return;
1185  }
1186 
1187  *got_frame = 1;
1189  p->key_frame = 1;
1190  ret = data_size;
1191 
1192 free_and_return:
1193  for (i = 0; i < IMAGE_ROLE_NB; i++)
1194  image_ctx_free(&s->image[i]);
1195 
1196  return ret;
1197 }
1198 
1200 {
1201  int x, y, ls;
1202  uint8_t *dec;
1203 
1204  ls = frame->linesize[3];
1205 
1206  /* filter first row using horizontal filter */
1207  dec = frame->data[3] + 1;
1208  for (x = 1; x < frame->width; x++, dec++)
1209  *dec += *(dec - 1);
1210 
1211  /* filter first column using vertical filter */
1212  dec = frame->data[3] + ls;
1213  for (y = 1; y < frame->height; y++, dec += ls)
1214  *dec += *(dec - ls);
1215 
1216  /* filter the rest using the specified filter */
1217  switch (m) {
1219  for (y = 1; y < frame->height; y++) {
1220  dec = frame->data[3] + y * ls + 1;
1221  for (x = 1; x < frame->width; x++, dec++)
1222  *dec += *(dec - 1);
1223  }
1224  break;
1225  case ALPHA_FILTER_VERTICAL:
1226  for (y = 1; y < frame->height; y++) {
1227  dec = frame->data[3] + y * ls + 1;
1228  for (x = 1; x < frame->width; x++, dec++)
1229  *dec += *(dec - ls);
1230  }
1231  break;
1232  case ALPHA_FILTER_GRADIENT:
1233  for (y = 1; y < frame->height; y++) {
1234  dec = frame->data[3] + y * ls + 1;
1235  for (x = 1; x < frame->width; x++, dec++)
1236  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1237  }
1238  break;
1239  }
1240 }
1241 
1243  const uint8_t *data_start,
1244  unsigned int data_size)
1245 {
1246  WebPContext *s = avctx->priv_data;
1247  int x, y, ret;
1248 
1249  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1250  GetByteContext gb;
1251 
1252  bytestream2_init(&gb, data_start, data_size);
1253  for (y = 0; y < s->height; y++)
1254  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1255  s->width);
1256  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1257  uint8_t *ap, *pp;
1258  int alpha_got_frame = 0;
1259 
1260  s->alpha_frame = av_frame_alloc();
1261  if (!s->alpha_frame)
1262  return AVERROR(ENOMEM);
1263 
1264  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1265  data_start, data_size, 1);
1266  if (ret < 0) {
1267  av_frame_free(&s->alpha_frame);
1268  return ret;
1269  }
1270  if (!alpha_got_frame) {
1271  av_frame_free(&s->alpha_frame);
1272  return AVERROR_INVALIDDATA;
1273  }
1274 
1275  /* copy green component of alpha image to alpha plane of primary image */
1276  for (y = 0; y < s->height; y++) {
1277  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1278  pp = p->data[3] + p->linesize[3] * y;
1279  for (x = 0; x < s->width; x++) {
1280  *pp = *ap;
1281  pp++;
1282  ap += 4;
1283  }
1284  }
1285  av_frame_free(&s->alpha_frame);
1286  }
1287 
1288  /* apply alpha filtering */
1289  if (s->alpha_filter)
1290  alpha_inverse_prediction(p, s->alpha_filter);
1291 
1292  return 0;
1293 }
1294 
1296  int *got_frame, uint8_t *data_start,
1297  unsigned int data_size)
1298 {
1299  WebPContext *s = avctx->priv_data;
1300  int ret;
1301 
1302  if (!s->initialized) {
1303  ff_vp8_decode_init(avctx);
1304  s->initialized = 1;
1305  s->v.actually_webp = 1;
1306  }
1307  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1308  s->lossless = 0;
1309 
1310  if (data_size > INT_MAX) {
1311  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1312  return AVERROR_PATCHWELCOME;
1313  }
1314 
1315  av_packet_unref(s->pkt);
1316  s->pkt->data = data_start;
1317  s->pkt->size = data_size;
1318 
1319  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1320  if (ret < 0)
1321  return ret;
1322 
1323  if (!*got_frame)
1324  return AVERROR_INVALIDDATA;
1325 
1326  update_canvas_size(avctx, avctx->width, avctx->height);
1327 
1328  if (s->has_alpha) {
1329  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1330  s->alpha_data_size);
1331  if (ret < 0)
1332  return ret;
1333  }
1334  return ret;
1335 }
1336 
1338  int *got_frame, AVPacket *avpkt)
1339 {
1340  WebPContext *s = avctx->priv_data;
1341  GetByteContext gb;
1342  int ret;
1343  uint32_t chunk_type, chunk_size;
1344  int vp8x_flags = 0;
1345 
1346  s->avctx = avctx;
1347  s->width = 0;
1348  s->height = 0;
1349  *got_frame = 0;
1350  s->has_alpha = 0;
1351  s->has_exif = 0;
1352  s->has_iccp = 0;
1353  bytestream2_init(&gb, avpkt->data, avpkt->size);
1354 
1355  if (bytestream2_get_bytes_left(&gb) < 12)
1356  return AVERROR_INVALIDDATA;
1357 
1358  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1359  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1360  return AVERROR_INVALIDDATA;
1361  }
1362 
1363  chunk_size = bytestream2_get_le32(&gb);
1364  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1365  return AVERROR_INVALIDDATA;
1366 
1367  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1368  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1369  return AVERROR_INVALIDDATA;
1370  }
1371 
1372  while (bytestream2_get_bytes_left(&gb) > 8) {
1373  char chunk_str[5] = { 0 };
1374 
1375  chunk_type = bytestream2_get_le32(&gb);
1376  chunk_size = bytestream2_get_le32(&gb);
1377  if (chunk_size == UINT32_MAX)
1378  return AVERROR_INVALIDDATA;
1379  chunk_size += chunk_size & 1;
1380 
1381  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1382  /* we seem to be running out of data, but it could also be that the
1383  bitstream has trailing junk leading to bogus chunk_size. */
1384  break;
1385  }
1386 
1387  switch (chunk_type) {
1388  case MKTAG('V', 'P', '8', ' '):
1389  if (!*got_frame) {
1390  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1391  avpkt->data + bytestream2_tell(&gb),
1392  chunk_size);
1393  if (ret < 0)
1394  return ret;
1395  }
1396  bytestream2_skip(&gb, chunk_size);
1397  break;
1398  case MKTAG('V', 'P', '8', 'L'):
1399  if (!*got_frame) {
1400  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1401  avpkt->data + bytestream2_tell(&gb),
1402  chunk_size, 0);
1403  if (ret < 0)
1404  return ret;
1406  }
1407  bytestream2_skip(&gb, chunk_size);
1408  break;
1409  case MKTAG('V', 'P', '8', 'X'):
1410  if (s->width || s->height || *got_frame) {
1411  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1412  return AVERROR_INVALIDDATA;
1413  }
1414  vp8x_flags = bytestream2_get_byte(&gb);
1415  bytestream2_skip(&gb, 3);
1416  s->width = bytestream2_get_le24(&gb) + 1;
1417  s->height = bytestream2_get_le24(&gb) + 1;
1418  ret = av_image_check_size(s->width, s->height, 0, avctx);
1419  if (ret < 0)
1420  return ret;
1421  break;
1422  case MKTAG('A', 'L', 'P', 'H'): {
1423  int alpha_header, filter_m, compression;
1424 
1425  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1426  av_log(avctx, AV_LOG_WARNING,
1427  "ALPHA chunk present, but alpha bit not set in the "
1428  "VP8X header\n");
1429  }
1430  if (chunk_size == 0) {
1431  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  alpha_header = bytestream2_get_byte(&gb);
1435  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1436  s->alpha_data_size = chunk_size - 1;
1437  bytestream2_skip(&gb, s->alpha_data_size);
1438 
1439  filter_m = (alpha_header >> 2) & 0x03;
1440  compression = alpha_header & 0x03;
1441 
1442  if (compression > ALPHA_COMPRESSION_VP8L) {
1443  av_log(avctx, AV_LOG_VERBOSE,
1444  "skipping unsupported ALPHA chunk\n");
1445  } else {
1446  s->has_alpha = 1;
1447  s->alpha_compression = compression;
1448  s->alpha_filter = filter_m;
1449  }
1450 
1451  break;
1452  }
1453  case MKTAG('E', 'X', 'I', 'F'): {
1454  int le, ifd_offset, exif_offset = bytestream2_tell(&gb);
1455  AVDictionary *exif_metadata = NULL;
1456  GetByteContext exif_gb;
1457 
1458  if (s->has_exif) {
1459  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1460  goto exif_end;
1461  }
1462  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1463  av_log(avctx, AV_LOG_WARNING,
1464  "EXIF chunk present, but Exif bit not set in the "
1465  "VP8X header\n");
1466 
1467  s->has_exif = 1;
1468  bytestream2_init(&exif_gb, avpkt->data + exif_offset,
1469  avpkt->size - exif_offset);
1470  if (ff_tdecode_header(&exif_gb, &le, &ifd_offset) < 0) {
1471  av_log(avctx, AV_LOG_ERROR, "invalid TIFF header "
1472  "in Exif data\n");
1473  goto exif_end;
1474  }
1475 
1476  bytestream2_seek(&exif_gb, ifd_offset, SEEK_SET);
1477  if (ff_exif_decode_ifd(avctx, &exif_gb, le, 0, &exif_metadata) < 0) {
1478  av_log(avctx, AV_LOG_ERROR, "error decoding Exif data\n");
1479  goto exif_end;
1480  }
1481 
1482  av_dict_copy(&p->metadata, exif_metadata, 0);
1483 
1484 exif_end:
1485  av_dict_free(&exif_metadata);
1486  bytestream2_skip(&gb, chunk_size);
1487  break;
1488  }
1489  case MKTAG('I', 'C', 'C', 'P'): {
1490  AVFrameSideData *sd;
1491 
1492  if (s->has_iccp) {
1493  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1494  bytestream2_skip(&gb, chunk_size);
1495  break;
1496  }
1497  if (!(vp8x_flags & VP8X_FLAG_ICC))
1498  av_log(avctx, AV_LOG_WARNING,
1499  "ICCP chunk present, but ICC Profile bit not set in the "
1500  "VP8X header\n");
1501 
1502  s->has_iccp = 1;
1504  if (!sd)
1505  return AVERROR(ENOMEM);
1506 
1507  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1508  break;
1509  }
1510  case MKTAG('A', 'N', 'I', 'M'):
1511  case MKTAG('A', 'N', 'M', 'F'):
1512  case MKTAG('X', 'M', 'P', ' '):
1513  AV_WL32(chunk_str, chunk_type);
1514  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1515  chunk_str);
1516  bytestream2_skip(&gb, chunk_size);
1517  break;
1518  default:
1519  AV_WL32(chunk_str, chunk_type);
1520  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1521  chunk_str);
1522  bytestream2_skip(&gb, chunk_size);
1523  break;
1524  }
1525  }
1526 
1527  if (!*got_frame) {
1528  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1529  return AVERROR_INVALIDDATA;
1530  }
1531 
1532  return avpkt->size;
1533 }
1534 
1536 {
1537  WebPContext *s = avctx->priv_data;
1538 
1539  s->pkt = av_packet_alloc();
1540  if (!s->pkt)
1541  return AVERROR(ENOMEM);
1542 
1543  return 0;
1544 }
1545 
1547 {
1548  WebPContext *s = avctx->priv_data;
1549 
1550  av_packet_free(&s->pkt);
1551 
1552  if (s->initialized)
1553  return ff_vp8_decode_free(avctx);
1554 
1555  return 0;
1556 }
1557 
1559  .p.name = "webp",
1560  .p.long_name = NULL_IF_CONFIG_SMALL("WebP image"),
1561  .p.type = AVMEDIA_TYPE_VIDEO,
1562  .p.id = AV_CODEC_ID_WEBP,
1563  .priv_data_size = sizeof(WebPContext),
1566  .close = webp_decode_close,
1567  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1568  .caps_internal = FF_CODEC_CAP_ICC_PROFILES,
1569 };
WebPContext::width
int width
Definition: webp.c:207
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:196
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2866
HuffReader::vlc
VLC vlc
Definition: webp.c:173
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:865
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:124
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:69
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1295
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:259
INIT_VLC_OUTPUT_LE
#define INIT_VLC_OUTPUT_LE
Definition: vlc.h:98
color
Definition: vf_paletteuse.c:600
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:125
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:107
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:176
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
GetByteContext
Definition: bytestream.h:33
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:235
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:104
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:120
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:784
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:770
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::width
int width
Definition: frame.h:397
w
uint8_t w
Definition: llviddspenc.c:38
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:223
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:132
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:116
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:43
b
#define b
Definition: input.c:34
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:115
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:184
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:478
FFCodec
Definition: codec_internal.h:118
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:128
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, int alphabet_size)
Definition: webp.c:254
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:212
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:123
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:127
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, const uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1086
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
HuffReader::simple
int simple
Definition: webp.c:174
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:124
init
static int init
Definition: av_tx.c:47
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:201
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:841
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:821
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:198
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:122
finish
static void finish(void)
Definition: movenc.c:342
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:101
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:211
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1071
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:204
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:890
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:114
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:58
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:777
ff_webp_decoder
const FFCodec ff_webp_decoder
Definition: webp.c:1558
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:954
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:552
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:137
WebPContext::has_exif
int has_exif
Definition: webp.c:205
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:331
WebPContext::has_alpha
int has_alpha
Definition: webp.c:200
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
PredictionMode
PredictionMode
Definition: webp.c:119
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ImageContext::frame
AVFrame * frame
Definition: webp.c:181
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1854
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:901
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:260
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
TransformType
TransformType
Definition: webp.c:112
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:129
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:65
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:78
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:546
bits
uint8_t bits
Definition: vp3data.h:141
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:68
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:851
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:64
ImageContext
Definition: webp.c:179
get_bits.h
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:402
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:183
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:220
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:190
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:126
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1546
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:185
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:984
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:175
WebPContext::height
int height
Definition: webp.c:208
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:106
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:874
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:141
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:894
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
tiff_common.h
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:182
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:494
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1535
WebPContext::v
VP8Context v
Definition: webp.c:194
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:72
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:66
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:160
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
ff_vp8_decode_init
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2933
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1199
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:167
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:756
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:169
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:60
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
AlphaCompression
AlphaCompression
Definition: webp.c:100
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:113
ImageContext::size_reduction
int size_reduction
Definition: webp.c:189
size
int size
Definition: twinvq_data.h:10344
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
ImageContext::role
enum ImageRole role
Definition: webp.c:180
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:432
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:960
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:59
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:116
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HuffReader
Definition: webp.c:172
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:462
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:130
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:109
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:216
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:791
WebPContext::lossless
int lossless
Definition: webp.c:209
WebPContext::reduced_width
int reduced_width
Definition: webp.c:215
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:67
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1853
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:197
AlphaFilter
AlphaFilter
Definition: webp.c:105
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:131
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:82
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
WebPContext::gb
GetBitContext gb
Definition: webp.c:195
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:923
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HuffmanIndex
HuffmanIndex
Definition: webp.c:136
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:222
len
int len
Definition: vorbis_enc_data.h:426
exif.h
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:811
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:243
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:881
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:217
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:801
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, const uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1242
AVCodecContext
main external API structure.
Definition: avcodec.h:398
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:139
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:156
VLC
Definition: vlc.h:31
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1337
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:625
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:226
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
WebPContext::initialized
int initialized
Definition: webp.c:199
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:999
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
WebPContext::alpha_data
const uint8_t * alpha_data
Definition: webp.c:203
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:70
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:108
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:426
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:121
WebPContext
Definition: webp.c:193
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
AVPacket
This structure stores compressed data.
Definition: packet.h:351
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2852
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
VP8Context
Definition: vp8.h:162
d
d
Definition: ffmpeg_filter.c:155
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
ImageRole
ImageRole
Definition: webp.c:150
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:228
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:316
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:140
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:206
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:529
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:831
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:102
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:763
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:122
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:202
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:138
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:152
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:133
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:164