FFmpeg
gifdec.c
Go to the documentation of this file.
1 /*
2  * GIF decoder
3  * Copyright (c) 2003 Fabrice Bellard
4  * Copyright (c) 2006 Baptiste Coudurier
5  * Copyright (c) 2012 Vitaliy E Sugrobov
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/imgutils.h"
25 #include "libavutil/opt.h"
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "internal.h"
29 #include "lzw.h"
30 #include "gif.h"
31 
32 /* This value is intentionally set to "transparent white" color.
33  * It is much better to have white background instead of black
34  * when gif image converted to format which not support transparency.
35  */
36 #define GIF_TRANSPARENT_COLOR 0x00ffffff
37 
38 typedef struct GifState {
39  const AVClass *class;
45  uint32_t bg_color;
49  /* intermediate buffer for storing color indices
50  * obtained from lzw-encoded data stream */
53 
54  /* after the frame is displayed, the disposal method is used */
57  /* rectangle describing area that must be disposed */
59  /* depending on disposal method we store either part of the image
60  * drawn on the canvas or background color that
61  * should be used upon disposal */
62  uint32_t * stored_img;
65 
68 
69  /* aux buffers */
70  uint32_t global_palette[256];
71  uint32_t local_palette[256];
72 
74  int keyframe;
76  int trans_color; /**< color value that is used instead of transparent color */
77 } GifState;
78 
79 static void gif_read_palette(GifState *s, uint32_t *pal, int nb)
80 {
81  int i;
82 
83  for (i = 0; i < nb; i++, pal++)
84  *pal = (0xffu << 24) | bytestream2_get_be24u(&s->gb);
85 }
86 
87 static void gif_fill(AVFrame *picture, uint32_t color)
88 {
89  uint32_t *p = (uint32_t *)picture->data[0];
90  uint32_t *p_end = p + (picture->linesize[0] / sizeof(uint32_t)) * picture->height;
91 
92  for (; p < p_end; p++)
93  *p = color;
94 }
95 
96 static void gif_fill_rect(AVFrame *picture, uint32_t color, int l, int t, int w, int h)
97 {
98  const int linesize = picture->linesize[0] / sizeof(uint32_t);
99  const uint32_t *py = (uint32_t *)picture->data[0] + t * linesize;
100  const uint32_t *pr, *pb = py + h * linesize;
101  uint32_t *px;
102 
103  for (; py < pb; py += linesize) {
104  px = (uint32_t *)py + l;
105  pr = px + w;
106 
107  for (; px < pr; px++)
108  *px = color;
109  }
110 }
111 
112 static void gif_copy_img_rect(const uint32_t *src, uint32_t *dst,
113  int linesize, int l, int t, int w, int h)
114 {
115  const int y_start = t * linesize;
116  const uint32_t *src_px,
117  *src_py = src + y_start,
118  *dst_py = dst + y_start;
119  const uint32_t *src_pb = src_py + h * linesize;
120  uint32_t *dst_px;
121 
122  for (; src_py < src_pb; src_py += linesize, dst_py += linesize) {
123  src_px = src_py + l;
124  dst_px = (uint32_t *)dst_py + l;
125 
126  memcpy(dst_px, src_px, w * sizeof(uint32_t));
127  }
128 }
129 
131 {
132  int left, top, width, height, bits_per_pixel, code_size, flags, pw;
133  int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size, lzwed_len;
134  uint32_t *ptr, *pal, *px, *pr, *ptr1;
135  int ret;
136  uint8_t *idx;
137 
138  /* At least 9 bytes of Image Descriptor. */
139  if (bytestream2_get_bytes_left(&s->gb) < 9)
140  return AVERROR_INVALIDDATA;
141 
142  left = bytestream2_get_le16u(&s->gb);
143  top = bytestream2_get_le16u(&s->gb);
144  width = bytestream2_get_le16u(&s->gb);
145  height = bytestream2_get_le16u(&s->gb);
146  flags = bytestream2_get_byteu(&s->gb);
147  is_interleaved = flags & 0x40;
148  has_local_palette = flags & 0x80;
149  bits_per_pixel = (flags & 0x07) + 1;
150 
151  ff_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);
152 
153  if (has_local_palette) {
154  pal_size = 1 << bits_per_pixel;
155 
156  if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
157  return AVERROR_INVALIDDATA;
158 
159  gif_read_palette(s, s->local_palette, pal_size);
160  pal = s->local_palette;
161  } else {
162  if (!s->has_global_palette) {
163  av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
164  return AVERROR_INVALIDDATA;
165  }
166 
167  pal = s->global_palette;
168  }
169 
170  if (s->keyframe) {
171  if (s->transparent_color_index == -1 && s->has_global_palette) {
172  /* transparency wasn't set before the first frame, fill with background color */
173  gif_fill(frame, s->bg_color);
174  } else {
175  /* otherwise fill with transparent color.
176  * this is necessary since by default picture filled with 0x80808080. */
177  gif_fill(frame, s->trans_color);
178  }
179  }
180 
181  /* verify that all the image is inside the screen dimensions */
182  if (!width || width > s->screen_width) {
183  av_log(s->avctx, AV_LOG_WARNING, "Invalid image width: %d, truncating.\n", width);
184  width = s->screen_width;
185  }
186  if (left >= s->screen_width) {
187  av_log(s->avctx, AV_LOG_ERROR, "Invalid left position: %d.\n", left);
188  return AVERROR_INVALIDDATA;
189  }
190  if (!height || height > s->screen_height) {
191  av_log(s->avctx, AV_LOG_WARNING, "Invalid image height: %d, truncating.\n", height);
192  height = s->screen_height;
193  }
194  if (top >= s->screen_height) {
195  av_log(s->avctx, AV_LOG_ERROR, "Invalid top position: %d.\n", top);
196  return AVERROR_INVALIDDATA;
197  }
198  if (left + width > s->screen_width) {
199  /* width must be kept around to avoid lzw vs line desync */
200  pw = s->screen_width - left;
201  av_log(s->avctx, AV_LOG_WARNING, "Image too wide by %d, truncating.\n",
202  left + width - s->screen_width);
203  } else {
204  pw = width;
205  }
206  if (top + height > s->screen_height) {
207  /* we don't care about the extra invisible lines */
208  av_log(s->avctx, AV_LOG_WARNING, "Image too high by %d, truncating.\n",
209  top + height - s->screen_height);
210  height = s->screen_height - top;
211  }
212 
213  /* process disposal method */
214  if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
215  gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
216  } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
217  gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
218  frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
219  }
220 
221  s->gce_prev_disposal = s->gce_disposal;
222 
223  if (s->gce_disposal != GCE_DISPOSAL_NONE) {
224  s->gce_l = left; s->gce_t = top;
225  s->gce_w = pw; s->gce_h = height;
226 
227  if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
228  if (s->transparent_color_index >= 0)
229  s->stored_bg_color = s->trans_color;
230  else
231  s->stored_bg_color = s->bg_color;
232  } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
233  av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
234  if (!s->stored_img)
235  return AVERROR(ENOMEM);
236 
237  gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
238  frame->linesize[0] / sizeof(uint32_t), left, top, pw, height);
239  }
240  }
241 
242  /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
243  if (bytestream2_get_bytes_left(&s->gb) < 2)
244  return AVERROR_INVALIDDATA;
245 
246  /* now get the image data */
247  code_size = bytestream2_get_byteu(&s->gb);
248  if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer,
249  bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) {
250  av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
251  return ret;
252  }
253 
254  /* read all the image */
255  linesize = frame->linesize[0] / sizeof(uint32_t);
256  ptr1 = (uint32_t *)frame->data[0] + top * linesize + left;
257  ptr = ptr1;
258  pass = 0;
259  y1 = 0;
260  for (y = 0; y < height; y++) {
261  int count = ff_lzw_decode(s->lzw, s->idx_line, width);
262  if (count != width) {
263  if (count)
264  av_log(s->avctx, AV_LOG_ERROR, "LZW decode failed\n");
265  goto decode_tail;
266  }
267 
268  pr = ptr + pw;
269 
270  for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
271  if (*idx != s->transparent_color_index)
272  *px = pal[*idx];
273  }
274 
275  if (is_interleaved) {
276  switch(pass) {
277  default:
278  case 0:
279  case 1:
280  y1 += 8;
281  ptr += linesize * 8;
282  break;
283  case 2:
284  y1 += 4;
285  ptr += linesize * 4;
286  break;
287  case 3:
288  y1 += 2;
289  ptr += linesize * 2;
290  break;
291  }
292  while (y1 >= height) {
293  y1 = 4 >> pass;
294  ptr = ptr1 + linesize * y1;
295  pass++;
296  }
297  } else {
298  ptr += linesize;
299  }
300  }
301 
302  decode_tail:
303  /* read the garbage data until end marker is found */
304  lzwed_len = ff_lzw_decode_tail(s->lzw);
305  bytestream2_skipu(&s->gb, lzwed_len);
306 
307  /* Graphic Control Extension's scope is single frame.
308  * Remove its influence. */
309  s->transparent_color_index = -1;
310  s->gce_disposal = GCE_DISPOSAL_NONE;
311 
312  return 0;
313 }
314 
316 {
317  int ext_code, ext_len, gce_flags, gce_transparent_index;
318 
319  /* There must be at least 2 bytes:
320  * 1 for extension label and 1 for extension length. */
321  if (bytestream2_get_bytes_left(&s->gb) < 2)
322  return AVERROR_INVALIDDATA;
323 
324  ext_code = bytestream2_get_byteu(&s->gb);
325  ext_len = bytestream2_get_byteu(&s->gb);
326 
327  ff_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len);
328 
329  switch(ext_code) {
330  case GIF_GCE_EXT_LABEL:
331  if (ext_len != 4)
332  goto discard_ext;
333 
334  /* We need at least 5 bytes more: 4 is for extension body
335  * and 1 for next block size. */
336  if (bytestream2_get_bytes_left(&s->gb) < 5)
337  return AVERROR_INVALIDDATA;
338 
339  gce_flags = bytestream2_get_byteu(&s->gb);
340  bytestream2_skipu(&s->gb, 2); // delay during which the frame is shown
341  gce_transparent_index = bytestream2_get_byteu(&s->gb);
342  if (gce_flags & 0x01)
343  s->transparent_color_index = gce_transparent_index;
344  else
345  s->transparent_color_index = -1;
346  s->gce_disposal = (gce_flags >> 2) & 0x7;
347 
348  ff_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n",
349  gce_flags,
350  s->transparent_color_index, s->gce_disposal);
351 
352  if (s->gce_disposal > 3) {
353  s->gce_disposal = GCE_DISPOSAL_NONE;
354  ff_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len);
355  }
356 
357  ext_len = bytestream2_get_byteu(&s->gb);
358  break;
359  }
360 
361  /* NOTE: many extension blocks can come after */
362  discard_ext:
363  while (ext_len) {
364  /* There must be at least ext_len bytes and 1 for next block size byte. */
365  if (bytestream2_get_bytes_left(&s->gb) < ext_len + 1)
366  return AVERROR_INVALIDDATA;
367 
368  bytestream2_skipu(&s->gb, ext_len);
369  ext_len = bytestream2_get_byteu(&s->gb);
370 
371  ff_dlog(s->avctx, "ext_len1=%d\n", ext_len);
372  }
373  return 0;
374 }
375 
377 {
378  uint8_t sig[6];
379  int v, n;
380  int background_color_index;
381 
382  if (bytestream2_get_bytes_left(&s->gb) < 13)
383  return AVERROR_INVALIDDATA;
384 
385  /* read gif signature */
386  bytestream2_get_bufferu(&s->gb, sig, 6);
387  if (memcmp(sig, gif87a_sig, 6) &&
388  memcmp(sig, gif89a_sig, 6))
389  return AVERROR_INVALIDDATA;
390 
391  /* read screen header */
392  s->transparent_color_index = -1;
393  s->screen_width = bytestream2_get_le16u(&s->gb);
394  s->screen_height = bytestream2_get_le16u(&s->gb);
395 
396  v = bytestream2_get_byteu(&s->gb);
397  s->color_resolution = ((v & 0x70) >> 4) + 1;
398  s->has_global_palette = (v & 0x80);
399  s->bits_per_pixel = (v & 0x07) + 1;
400  background_color_index = bytestream2_get_byteu(&s->gb);
401  n = bytestream2_get_byteu(&s->gb);
402  if (n) {
403  s->avctx->sample_aspect_ratio.num = n + 15;
404  s->avctx->sample_aspect_ratio.den = 64;
405  }
406 
407  ff_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
408  s->screen_width, s->screen_height, s->bits_per_pixel,
409  s->has_global_palette);
410 
411  if (s->has_global_palette) {
412  s->background_color_index = background_color_index;
413  n = 1 << s->bits_per_pixel;
414  if (bytestream2_get_bytes_left(&s->gb) < n * 3)
415  return AVERROR_INVALIDDATA;
416 
417  gif_read_palette(s, s->global_palette, n);
418  s->bg_color = s->global_palette[s->background_color_index];
419  } else
420  s->background_color_index = -1;
421 
422  return 0;
423 }
424 
426 {
427  while (bytestream2_get_bytes_left(&s->gb) > 0) {
428  int code = bytestream2_get_byte(&s->gb);
429  int ret;
430 
431  av_log(s->avctx, AV_LOG_DEBUG, "code=%02x '%c'\n", code, code);
432 
433  switch (code) {
434  case GIF_IMAGE_SEPARATOR:
435  return gif_read_image(s, frame);
437  if ((ret = gif_read_extension(s)) < 0)
438  return ret;
439  break;
440  case GIF_TRAILER:
441  /* end of image */
442  return AVERROR_EOF;
443  default:
444  /* erroneous block label */
445  return AVERROR_INVALIDDATA;
446  }
447  }
448  return AVERROR_EOF;
449 }
450 
452 {
453  GifState *s = avctx->priv_data;
454 
455  s->avctx = avctx;
456 
457  avctx->pix_fmt = AV_PIX_FMT_RGB32;
458  s->frame = av_frame_alloc();
459  if (!s->frame)
460  return AVERROR(ENOMEM);
461  ff_lzw_decode_open(&s->lzw);
462  if (!s->lzw)
463  return AVERROR(ENOMEM);
464  return 0;
465 }
466 
467 static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
468 {
469  GifState *s = avctx->priv_data;
470  int ret;
471 
472  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
473 
474  s->frame->pts = avpkt->pts;
475 #if FF_API_PKT_PTS
477  s->frame->pkt_pts = avpkt->pts;
479 #endif
480  s->frame->pkt_dts = avpkt->dts;
481  s->frame->pkt_duration = avpkt->duration;
482 
483  if (avpkt->size >= 6) {
484  s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
485  memcmp(avpkt->data, gif89a_sig, 6) == 0;
486  } else {
487  s->keyframe = 0;
488  }
489 
490  if (s->keyframe) {
491  s->keyframe_ok = 0;
492  s->gce_prev_disposal = GCE_DISPOSAL_NONE;
493  if ((ret = gif_read_header1(s)) < 0)
494  return ret;
495 
496  if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0)
497  return ret;
498 
499  av_frame_unref(s->frame);
500  if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
501  return ret;
502 
503  av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
504  if (!s->idx_line)
505  return AVERROR(ENOMEM);
506 
507  s->frame->pict_type = AV_PICTURE_TYPE_I;
508  s->frame->key_frame = 1;
509  s->keyframe_ok = 1;
510  } else {
511  if (!s->keyframe_ok) {
512  av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
513  return AVERROR_INVALIDDATA;
514  }
515 
516  if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
517  return ret;
518 
519  s->frame->pict_type = AV_PICTURE_TYPE_P;
520  s->frame->key_frame = 0;
521  }
522 
523  ret = gif_parse_next_image(s, s->frame);
524  if (ret < 0)
525  return ret;
526 
527  if ((ret = av_frame_ref(data, s->frame)) < 0)
528  return ret;
529  *got_frame = 1;
530 
531  return bytestream2_tell(&s->gb);
532 }
533 
535 {
536  GifState *s = avctx->priv_data;
537 
538  ff_lzw_decode_close(&s->lzw);
539  av_frame_free(&s->frame);
540  av_freep(&s->idx_line);
541  av_freep(&s->stored_img);
542 
543  return 0;
544 }
545 
546 static const AVOption options[] = {
547  { "trans_color", "color value (ARGB) that is used instead of transparent color",
548  offsetof(GifState, trans_color), AV_OPT_TYPE_INT,
549  {.i64 = GIF_TRANSPARENT_COLOR}, 0, 0xffffffff,
551  { NULL },
552 };
553 
554 static const AVClass decoder_class = {
555  .class_name = "gif decoder",
556  .item_name = av_default_item_name,
557  .option = options,
558  .version = LIBAVUTIL_VERSION_INT,
559  .category = AV_CLASS_CATEGORY_DECODER,
560 };
561 
563  .name = "gif",
564  .long_name = NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"),
565  .type = AVMEDIA_TYPE_VIDEO,
566  .id = AV_CODEC_ID_GIF,
567  .priv_data_size = sizeof(GifState),
569  .close = gif_decode_close,
571  .capabilities = AV_CODEC_CAP_DR1,
572  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
574  .priv_class = &decoder_class,
575 };
GIF_TRANSPARENT_COLOR
#define GIF_TRANSPARENT_COLOR
Definition: gifdec.c:36
AVCodec
AVCodec.
Definition: avcodec.h:3481
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:36
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
color
Definition: vf_paletteuse.c:588
GetByteContext
Definition: bytestream.h:33
n
int n
Definition: avisynth_c.h:760
options
static const AVOption options[]
Definition: gifdec.c:546
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
GifState::idx_line_size
int idx_line_size
Definition: gifdec.c:52
GifState::background_color_index
int background_color_index
Definition: gifdec.c:46
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
count
void INT64 INT64 count
Definition: avisynth_c.h:767
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
GIF_TRAILER
#define GIF_TRAILER
Definition: gif.h:42
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AVOption
AVOption.
Definition: opt.h:246
data
const char data[16]
Definition: mxf.c:91
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:2012
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
GIF_GCE_EXT_LABEL
#define GIF_GCE_EXT_LABEL
Definition: gif.h:45
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
GifState::screen_height
int screen_height
Definition: gifdec.c:42
gif_parse_next_image
static int gif_parse_next_image(GifState *s, AVFrame *frame)
Definition: gifdec.c:425
src
#define src
Definition: vp8dsp.c:254
gif_decode_init
static av_cold int gif_decode_init(AVCodecContext *avctx)
Definition: gifdec.c:451
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
gif89a_sig
static const uint8_t gif89a_sig[6]
Definition: gif.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
GifState::stored_bg_color
int stored_bg_color
Definition: gifdec.c:64
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
GifState::color_resolution
int color_resolution
Definition: gifdec.c:48
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
GIF_IMAGE_SEPARATOR
#define GIF_IMAGE_SEPARATOR
Definition: gif.h:44
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
decoder_class
static const AVClass decoder_class
Definition: gifdec.c:554
LZWState
Definition: lzw.c:46
GifState::trans_color
int trans_color
color value that is used instead of transparent color
Definition: gifdec.c:76
pass
#define pass
Definition: fft_template.c:619
GifState::gce_l
int gce_l
Definition: gifdec.c:58
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
gif_read_extension
static int gif_read_extension(GifState *s)
Definition: gifdec.c:315
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
GifState::keyframe_ok
int keyframe_ok
Definition: gifdec.c:75
gif_read_header1
static int gif_read_header1(GifState *s)
Definition: gifdec.c:376
GifState::stored_img_size
int stored_img_size
Definition: gifdec.c:63
FF_LZW_GIF
@ FF_LZW_GIF
Definition: lzw.h:38
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
gif_read_image
static int gif_read_image(GifState *s, AVFrame *frame)
Definition: gifdec.c:130
GifState::screen_width
int screen_width
Definition: gifdec.c:41
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
lzw.h
LZW decoding routines.
gif_fill_rect
static void gif_fill_rect(AVFrame *picture, uint32_t color, int l, int t, int w, int h)
Definition: gifdec.c:96
gif.h
GifState::frame
AVFrame * frame
Definition: gifdec.c:40
GifState::bits_per_pixel
int bits_per_pixel
Definition: gifdec.c:44
GCE_DISPOSAL_NONE
#define GCE_DISPOSAL_NONE
Definition: gif.h:37
GCE_DISPOSAL_RESTORE
#define GCE_DISPOSAL_RESTORE
Definition: gif.h:40
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
GifState::gce_disposal
int gce_disposal
Definition: gifdec.c:56
GifState::gce_prev_disposal
int gce_prev_disposal
Definition: gifdec.c:55
GifState::stored_img
uint32_t * stored_img
Definition: gifdec.c:62
GifState::gce_h
int gce_h
Definition: gifdec.c:58
GifState::lzw
LZWState * lzw
Definition: gifdec.c:67
AV_CODEC_ID_GIF
@ AV_CODEC_ID_GIF
Definition: avcodec.h:315
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
gif87a_sig
static const uint8_t gif87a_sig[6]
Definition: gif.h:34
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
GIF_EXTENSION_INTRODUCER
#define GIF_EXTENSION_INTRODUCER
Definition: gif.h:43
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
uint8_t
uint8_t
Definition: audio_convert.c:194
GifState::bg_color
uint32_t bg_color
Definition: gifdec.c:45
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
gif_fill
static void gif_fill(AVFrame *picture, uint32_t color)
Definition: gifdec.c:87
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
avcodec.h
GifState::idx_line
uint8_t * idx_line
Definition: gifdec.c:51
ret
ret
Definition: filter_design.txt:187
GCE_DISPOSAL_BACKGROUND
#define GCE_DISPOSAL_BACKGROUND
Definition: gif.h:39
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
GifState::keyframe
int keyframe
Definition: gifdec.c:74
GifState::transparent_color_index
int transparent_color_index
Definition: gifdec.c:47
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
GifState::local_palette
uint32_t local_palette[256]
Definition: gifdec.c:71
gif_copy_img_rect
static void gif_copy_img_rect(const uint32_t *src, uint32_t *dst, int linesize, int l, int t, int w, int h)
Definition: gifdec.c:112
GifState
Definition: gifdec.c:38
ff_gif_decoder
AVCodec ff_gif_decoder
Definition: gifdec.c:562
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
GifState::has_global_palette
int has_global_palette
Definition: gifdec.c:43
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
gif_read_palette
static void gif_read_palette(GifState *s, uint32_t *pal, int nb)
Definition: gifdec.c:79
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
GifState::gb
GetByteContext gb
Definition: gifdec.c:66
gif_decode_frame
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: gifdec.c:467
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
gif_decode_close
static av_cold int gif_decode_close(AVCodecContext *avctx)
Definition: gifdec.c:534
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
GifState::gce_w
int gce_w
Definition: gifdec.c:58
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_lzw_decode_tail
int ff_lzw_decode_tail(LZWState *p)
Definition: lzw.c:99
h
h
Definition: vp9dsp_template.c:2038
GifState::gce_t
int gce_t
Definition: gifdec.c:58
GifState::global_palette
uint32_t global_palette[256]
Definition: gifdec.c:70
GifState::avctx
AVCodecContext * avctx
Definition: gifdec.c:73