FFmpeg
gifdec.c
Go to the documentation of this file.
1 /*
2  * GIF decoder
3  * Copyright (c) 2003 Fabrice Bellard
4  * Copyright (c) 2006 Baptiste Coudurier
5  * Copyright (c) 2012 Vitaliy E Sugrobov
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/mem.h"
25 #include "libavutil/opt.h"
26 #include "avcodec.h"
27 #include "bytestream.h"
28 #include "codec_internal.h"
29 #include "decode.h"
30 #include "lzw.h"
31 #include "gif.h"
32 
33 /* This value is intentionally set to "transparent white" color.
34  * It is much better to have white background instead of black
35  * when gif image converted to format which not support transparency.
36  */
37 #define GIF_TRANSPARENT_COLOR 0x00ffffff
38 
39 typedef struct GifState {
40  const AVClass *class;
46  uint32_t bg_color;
50  /* intermediate buffer for storing color indices
51  * obtained from lzw-encoded data stream */
52  uint8_t *idx_line;
54 
55  /* after the frame is displayed, the disposal method is used */
58  /* rectangle describing area that must be disposed */
60  /* depending on disposal method we store either part of the image
61  * drawn on the canvas or background color that
62  * should be used upon disposal */
63  uint8_t *stored_img;
66 
69 
70  /* aux buffers */
71  uint32_t global_palette[256];
72  uint32_t local_palette[256];
73 
75  int keyframe;
77  int trans_color; /**< color value that is used instead of transparent color */
78 } GifState;
79 
80 static void gif_read_palette(GifState *s, uint32_t *pal, int nb)
81 {
82  int i;
83 
84  for (i = 0; i < nb; i++, pal++)
85  *pal = (0xffu << 24) | bytestream2_get_be24u(&s->gb);
86 }
87 
88 static void gif_fill(AVFrame *picture, uint32_t color)
89 {
90  const ptrdiff_t linesize = picture->linesize[0];
91  uint8_t *py = picture->data[0];
92  const int w = picture->width;
93  const int h = picture->height;
94 
95  for (int y = 0; y < h; y++) {
96  uint32_t *px = (uint32_t *)py;
97  for (int x = 0; x < w; x++)
98  px[x] = color;
99  py += linesize;
100  }
101 }
102 
103 static void gif_fill_rect(AVFrame *picture, uint32_t color, int l, int t, int w, int h)
104 {
105  const ptrdiff_t linesize = picture->linesize[0];
106  uint8_t *py = picture->data[0] + t * linesize;
107 
108  for (int y = 0; y < h; y++) {
109  uint32_t *px = ((uint32_t *)py) + l;
110  for (int x = 0; x < w; x++)
111  px[x] = color;
112  py += linesize;
113  }
114 }
115 
116 static void gif_copy_img_rect(const uint8_t *src, uint8_t *dst,
117  ptrdiff_t src_linesize,
118  ptrdiff_t dst_linesize,
119  int l, int t, int w, int h)
120 {
121  const uint8_t *src_py = src;
122  uint8_t *dst_py = dst;
123 
124  src_py += t * src_linesize;
125  dst_py += t * dst_linesize;
126  for (int y = 0; y < h; y++) {
127  memcpy(dst_py + l * 4, src_py + l * 4, w * 4);
128  src_py += src_linesize;
129  dst_py += dst_linesize;
130  }
131 }
132 
134 {
135  int left, top, width, height, bits_per_pixel, code_size, flags, pw;
136  int is_interleaved, has_local_palette, y, pass, y1, pal_size, lzwed_len;
137  uint32_t *ptr, *pal, *px, *pr, *ptr1;
138  ptrdiff_t linesize;
139  int ret;
140  uint8_t *idx;
141 
142  /* At least 9 bytes of Image Descriptor. */
143  if (bytestream2_get_bytes_left(&s->gb) < 9)
144  return AVERROR_INVALIDDATA;
145 
146  left = bytestream2_get_le16u(&s->gb);
147  top = bytestream2_get_le16u(&s->gb);
148  width = bytestream2_get_le16u(&s->gb);
149  height = bytestream2_get_le16u(&s->gb);
150  flags = bytestream2_get_byteu(&s->gb);
151  is_interleaved = flags & 0x40;
152  has_local_palette = flags & 0x80;
153  bits_per_pixel = (flags & 0x07) + 1;
154 
155  ff_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);
156 
157  if (has_local_palette) {
158  pal_size = 1 << bits_per_pixel;
159 
160  if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
161  return AVERROR_INVALIDDATA;
162 
163  gif_read_palette(s, s->local_palette, pal_size);
164  pal = s->local_palette;
165  } else {
166  if (!s->has_global_palette) {
167  av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
168  return AVERROR_INVALIDDATA;
169  }
170 
171  pal = s->global_palette;
172  }
173 
174  if (s->keyframe) {
175  if (s->transparent_color_index == -1 && s->has_global_palette) {
176  /* transparency wasn't set before the first frame, fill with background color */
177  gif_fill(frame, s->bg_color);
178  } else {
179  /* otherwise fill with transparent color.
180  * this is necessary since by default picture filled with 0x80808080. */
181  gif_fill(frame, s->trans_color);
182  }
183  }
184 
185  /* verify that all the image is inside the screen dimensions */
186  if (!width || width > s->screen_width) {
187  av_log(s->avctx, AV_LOG_WARNING, "Invalid image width: %d, truncating.\n", width);
188  width = s->screen_width;
189  }
190  if (left >= s->screen_width) {
191  av_log(s->avctx, AV_LOG_ERROR, "Invalid left position: %d.\n", left);
192  return AVERROR_INVALIDDATA;
193  }
194  if (!height || height > s->screen_height) {
195  av_log(s->avctx, AV_LOG_WARNING, "Invalid image height: %d, truncating.\n", height);
196  height = s->screen_height;
197  }
198  if (top >= s->screen_height) {
199  av_log(s->avctx, AV_LOG_ERROR, "Invalid top position: %d.\n", top);
200  return AVERROR_INVALIDDATA;
201  }
202  if (left + width > s->screen_width) {
203  /* width must be kept around to avoid lzw vs line desync */
204  pw = s->screen_width - left;
205  av_log(s->avctx, AV_LOG_WARNING, "Image too wide by %d, truncating.\n",
206  left + width - s->screen_width);
207  } else {
208  pw = width;
209  }
210  if (top + height > s->screen_height) {
211  /* we don't care about the extra invisible lines */
212  av_log(s->avctx, AV_LOG_WARNING, "Image too high by %d, truncating.\n",
213  top + height - s->screen_height);
214  height = s->screen_height - top;
215  }
216 
217  /* process disposal method */
218  if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
219  gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
220  } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
221  gif_copy_img_rect(s->stored_img, frame->data[0],
222  FFABS(frame->linesize[0]), frame->linesize[0], s->gce_l, s->gce_t, s->gce_w, s->gce_h);
223  }
224 
225  s->gce_prev_disposal = s->gce_disposal;
226 
227  if (s->gce_disposal != GCE_DISPOSAL_NONE) {
228  s->gce_l = left; s->gce_t = top;
229  s->gce_w = pw; s->gce_h = height;
230 
231  if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
232  if (s->transparent_color_index >= 0)
233  s->stored_bg_color = s->trans_color;
234  else
235  s->stored_bg_color = s->bg_color;
236  } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
237  av_fast_malloc(&s->stored_img, &s->stored_img_size, FFABS(frame->linesize[0]) * frame->height);
238  if (!s->stored_img)
239  return AVERROR(ENOMEM);
240 
241  gif_copy_img_rect(frame->data[0], s->stored_img,
242  frame->linesize[0], FFABS(frame->linesize[0]), left, top, pw, height);
243  }
244  }
245 
246  /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
247  if (bytestream2_get_bytes_left(&s->gb) < 2)
248  return AVERROR_INVALIDDATA;
249 
250  /* now get the image data */
251  code_size = bytestream2_get_byteu(&s->gb);
252  if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer,
253  bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) {
254  av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
255  return ret;
256  }
257 
258  /* read all the image */
259  linesize = frame->linesize[0];
260  ptr1 = (uint32_t *)(frame->data[0] + top * linesize) + left;
261  ptr = ptr1;
262  pass = 0;
263  y1 = 0;
264  for (y = 0; y < height; y++) {
265  int count = ff_lzw_decode(s->lzw, s->idx_line, width);
266  if (count != width) {
267  if (count)
268  av_log(s->avctx, AV_LOG_ERROR, "LZW decode failed\n");
269  goto decode_tail;
270  }
271 
272  pr = ptr + pw;
273 
274  for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
275  if (*idx != s->transparent_color_index)
276  *px = pal[*idx];
277  }
278 
279  if (is_interleaved) {
280  switch(pass) {
281  default:
282  case 0:
283  case 1:
284  y1 += 8;
285  ptr += linesize * 2;
286  break;
287  case 2:
288  y1 += 4;
289  ptr += linesize * 1;
290  break;
291  case 3:
292  y1 += 2;
293  ptr += linesize / 2;
294  break;
295  }
296  while (y1 >= height) {
297  y1 = 4 >> pass;
298  ptr = ptr1 + linesize / 4 * y1;
299  pass++;
300  }
301  } else {
302  ptr += linesize / 4;
303  }
304  }
305 
306  decode_tail:
307  /* read the garbage data until end marker is found */
308  lzwed_len = ff_lzw_decode_tail(s->lzw);
309  bytestream2_skipu(&s->gb, lzwed_len);
310 
311  /* Graphic Control Extension's scope is single frame.
312  * Remove its influence. */
313  s->transparent_color_index = -1;
314  s->gce_disposal = GCE_DISPOSAL_NONE;
315 
316  return 0;
317 }
318 
320 {
321  int ext_code, ext_len, gce_flags, gce_transparent_index;
322 
323  /* There must be at least 2 bytes:
324  * 1 for extension label and 1 for extension length. */
325  if (bytestream2_get_bytes_left(&s->gb) < 2)
326  return AVERROR_INVALIDDATA;
327 
328  ext_code = bytestream2_get_byteu(&s->gb);
329  ext_len = bytestream2_get_byteu(&s->gb);
330 
331  ff_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len);
332 
333  switch(ext_code) {
334  case GIF_GCE_EXT_LABEL:
335  if (ext_len != 4)
336  goto discard_ext;
337 
338  /* We need at least 5 bytes more: 4 is for extension body
339  * and 1 for next block size. */
340  if (bytestream2_get_bytes_left(&s->gb) < 5)
341  return AVERROR_INVALIDDATA;
342 
343  gce_flags = bytestream2_get_byteu(&s->gb);
344  bytestream2_skipu(&s->gb, 2); // delay during which the frame is shown
345  gce_transparent_index = bytestream2_get_byteu(&s->gb);
346  if (gce_flags & 0x01)
347  s->transparent_color_index = gce_transparent_index;
348  else
349  s->transparent_color_index = -1;
350  s->gce_disposal = (gce_flags >> 2) & 0x7;
351 
352  ff_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n",
353  gce_flags,
354  s->transparent_color_index, s->gce_disposal);
355 
356  if (s->gce_disposal > 3) {
357  s->gce_disposal = GCE_DISPOSAL_NONE;
358  ff_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len);
359  }
360 
361  ext_len = bytestream2_get_byteu(&s->gb);
362  break;
363  }
364 
365  /* NOTE: many extension blocks can come after */
366  discard_ext:
367  while (ext_len) {
368  /* There must be at least ext_len bytes and 1 for next block size byte. */
369  if (bytestream2_get_bytes_left(&s->gb) < ext_len + 1)
370  return AVERROR_INVALIDDATA;
371 
372  bytestream2_skipu(&s->gb, ext_len);
373  ext_len = bytestream2_get_byteu(&s->gb);
374 
375  ff_dlog(s->avctx, "ext_len1=%d\n", ext_len);
376  }
377  return 0;
378 }
379 
381 {
382  uint8_t sig[6];
383  int v, n;
384  int background_color_index;
385 
386  if (bytestream2_get_bytes_left(&s->gb) < 13)
387  return AVERROR_INVALIDDATA;
388 
389  /* read gif signature */
390  bytestream2_get_bufferu(&s->gb, sig, 6);
391  if (memcmp(sig, gif87a_sig, 6) &&
392  memcmp(sig, gif89a_sig, 6))
393  return AVERROR_INVALIDDATA;
394 
395  /* read screen header */
396  s->transparent_color_index = -1;
397  s->screen_width = bytestream2_get_le16u(&s->gb);
398  s->screen_height = bytestream2_get_le16u(&s->gb);
399 
400  v = bytestream2_get_byteu(&s->gb);
401  s->color_resolution = ((v & 0x70) >> 4) + 1;
402  s->has_global_palette = (v & 0x80);
403  s->bits_per_pixel = (v & 0x07) + 1;
404  background_color_index = bytestream2_get_byteu(&s->gb);
405  n = bytestream2_get_byteu(&s->gb);
406  if (n) {
407  s->avctx->sample_aspect_ratio.num = n + 15;
408  s->avctx->sample_aspect_ratio.den = 64;
409  }
410 
411  ff_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
412  s->screen_width, s->screen_height, s->bits_per_pixel,
413  s->has_global_palette);
414 
415  if (s->has_global_palette) {
416  s->background_color_index = background_color_index;
417  n = 1 << s->bits_per_pixel;
418  if (bytestream2_get_bytes_left(&s->gb) < n * 3)
419  return AVERROR_INVALIDDATA;
420 
421  gif_read_palette(s, s->global_palette, n);
422  s->bg_color = s->global_palette[s->background_color_index];
423  } else
424  s->background_color_index = -1;
425 
426  return 0;
427 }
428 
430 {
431  while (bytestream2_get_bytes_left(&s->gb) > 0) {
432  int code = bytestream2_get_byte(&s->gb);
433  int ret;
434 
435  av_log(s->avctx, AV_LOG_DEBUG, "code=%02x '%c'\n", code, code);
436 
437  switch (code) {
438  case GIF_IMAGE_SEPARATOR:
439  return gif_read_image(s, frame);
441  if ((ret = gif_read_extension(s)) < 0)
442  return ret;
443  break;
444  case GIF_TRAILER:
445  /* end of image */
446  return AVERROR_EOF;
447  default:
448  /* erroneous block label */
449  return AVERROR_INVALIDDATA;
450  }
451  }
452  return AVERROR_EOF;
453 }
454 
456 {
457  GifState *s = avctx->priv_data;
458 
459  s->avctx = avctx;
460 
461  avctx->pix_fmt = AV_PIX_FMT_RGB32;
462  s->frame = av_frame_alloc();
463  if (!s->frame)
464  return AVERROR(ENOMEM);
465  ff_lzw_decode_open(&s->lzw);
466  if (!s->lzw)
467  return AVERROR(ENOMEM);
468  return 0;
469 }
470 
471 static int gif_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
472  int *got_frame, AVPacket *avpkt)
473 {
474  GifState *s = avctx->priv_data;
475  int ret;
476 
477  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
478 
479  if (avpkt->size >= 6) {
480  s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
481  memcmp(avpkt->data, gif89a_sig, 6) == 0;
482  } else {
483  s->keyframe = 0;
484  }
485 
486  if (s->keyframe) {
487  s->keyframe_ok = 0;
488  s->gce_prev_disposal = GCE_DISPOSAL_NONE;
489  if ((ret = gif_read_header1(s)) < 0)
490  return ret;
491 
492  if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0)
493  return ret;
494 
495  av_frame_unref(s->frame);
496  av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
497  if (!s->idx_line)
498  return AVERROR(ENOMEM);
499  } else if (!s->keyframe_ok) {
500  av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
501  return AVERROR_INVALIDDATA;
502  }
503 
504  ret = ff_reget_buffer(avctx, s->frame, 0);
505  if (ret < 0)
506  return ret;
507 
508  ret = gif_parse_next_image(s, s->frame);
509  if (ret < 0)
510  return ret;
511 
512  if ((ret = av_frame_ref(rframe, s->frame)) < 0)
513  return ret;
514 
515  rframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
516  rframe->flags = AV_FRAME_FLAG_KEY * s->keyframe;
517  s->keyframe_ok |= !!s->keyframe;
518 
519  *got_frame = 1;
520 
521  return bytestream2_tell(&s->gb);
522 }
523 
525 {
526  GifState *s = avctx->priv_data;
527 
528  ff_lzw_decode_close(&s->lzw);
529  av_frame_free(&s->frame);
530  av_freep(&s->idx_line);
531  av_freep(&s->stored_img);
532 
533  return 0;
534 }
535 
536 static const AVOption options[] = {
537  { "trans_color", "color value (ARGB) that is used instead of transparent color",
538  offsetof(GifState, trans_color), AV_OPT_TYPE_INT,
539  {.i64 = GIF_TRANSPARENT_COLOR}, 0, 0xffffffff,
541  { NULL },
542 };
543 
544 static const AVClass decoder_class = {
545  .class_name = "gif decoder",
546  .item_name = av_default_item_name,
547  .option = options,
548  .version = LIBAVUTIL_VERSION_INT,
549  .category = AV_CLASS_CATEGORY_DECODER,
550 };
551 
553  .p.name = "gif",
554  CODEC_LONG_NAME("GIF (Graphics Interchange Format)"),
555  .p.type = AVMEDIA_TYPE_VIDEO,
556  .p.id = AV_CODEC_ID_GIF,
557  .priv_data_size = sizeof(GifState),
559  .close = gif_decode_close,
561  .p.capabilities = AV_CODEC_CAP_DR1,
562  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
563  .p.priv_class = &decoder_class,
564 };
GIF_TRANSPARENT_COLOR
#define GIF_TRANSPARENT_COLOR
Definition: gifdec.c:37
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
color
Definition: vf_paletteuse.c:513
GetByteContext
Definition: bytestream.h:33
options
static const AVOption options[]
Definition: gifdec.c:536
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
GifState::idx_line_size
int idx_line_size
Definition: gifdec.c:53
GifState::background_color_index
int background_color_index
Definition: gifdec.c:47
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
GIF_TRAILER
#define GIF_TRAILER
Definition: gif.h:42
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::width
int width
Definition: frame.h:461
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:539
AVOption
AVOption.
Definition: opt.h:429
FFCodec
Definition: codec_internal.h:127
gif_decode_frame
static int gif_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: gifdec.c:471
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
GIF_GCE_EXT_LABEL
#define GIF_GCE_EXT_LABEL
Definition: gif.h:45
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
GifState::screen_height
int screen_height
Definition: gifdec.c:43
gif_parse_next_image
static int gif_parse_next_image(GifState *s, AVFrame *frame)
Definition: gifdec.c:429
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
gif_decode_init
static av_cold int gif_decode_init(AVCodecContext *avctx)
Definition: gifdec.c:455
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
gif89a_sig
static const uint8_t gif89a_sig[6]
Definition: gif.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
GifState::stored_bg_color
int stored_bg_color
Definition: gifdec.c:65
GifState::color_resolution
int color_resolution
Definition: gifdec.c:49
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
GIF_IMAGE_SEPARATOR
#define GIF_IMAGE_SEPARATOR
Definition: gif.h:44
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
decoder_class
static const AVClass decoder_class
Definition: gifdec.c:544
gif_copy_img_rect
static void gif_copy_img_rect(const uint8_t *src, uint8_t *dst, ptrdiff_t src_linesize, ptrdiff_t dst_linesize, int l, int t, int w, int h)
Definition: gifdec.c:116
decode.h
LZWState
Definition: lzw.c:46
GifState::trans_color
int trans_color
color value that is used instead of transparent color
Definition: gifdec.c:77
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
GifState::gce_l
int gce_l
Definition: gifdec.c:59
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
gif_read_extension
static int gif_read_extension(GifState *s)
Definition: gifdec.c:319
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
options
Definition: swscale.c:42
GifState::keyframe_ok
int keyframe_ok
Definition: gifdec.c:76
gif_read_header1
static int gif_read_header1(GifState *s)
Definition: gifdec.c:380
GifState::stored_img_size
int stored_img_size
Definition: gifdec.c:64
FF_LZW_GIF
@ FF_LZW_GIF
Definition: lzw.h:38
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
gif_read_image
static int gif_read_image(GifState *s, AVFrame *frame)
Definition: gifdec.c:133
GifState::screen_width
int screen_width
Definition: gifdec.c:42
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
lzw.h
LZW decoding routines.
gif_fill_rect
static void gif_fill_rect(AVFrame *picture, uint32_t color, int l, int t, int w, int h)
Definition: gifdec.c:103
gif.h
GifState::frame
AVFrame * frame
Definition: gifdec.c:41
GifState::bits_per_pixel
int bits_per_pixel
Definition: gifdec.c:45
GCE_DISPOSAL_NONE
#define GCE_DISPOSAL_NONE
Definition: gif.h:37
GCE_DISPOSAL_RESTORE
#define GCE_DISPOSAL_RESTORE
Definition: gif.h:40
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:475
GifState::gce_disposal
int gce_disposal
Definition: gifdec.c:57
GifState::gce_prev_disposal
int gce_prev_disposal
Definition: gifdec.c:56
GifState::gce_h
int gce_h
Definition: gifdec.c:59
GifState::lzw
LZWState * lzw
Definition: gifdec.c:68
AV_CODEC_ID_GIF
@ AV_CODEC_ID_GIF
Definition: codec_id.h:149
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
gif87a_sig
static const uint8_t gif87a_sig[6]
Definition: gif.h:34
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
GIF_EXTENSION_INTRODUCER
#define GIF_EXTENSION_INTRODUCER
Definition: gif.h:43
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:358
GifState::bg_color
uint32_t bg_color
Definition: gifdec.c:46
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
gif_fill
static void gif_fill(AVFrame *picture, uint32_t color)
Definition: gifdec.c:88
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
avcodec.h
GifState::idx_line
uint8_t * idx_line
Definition: gifdec.c:52
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1815
ret
ret
Definition: filter_design.txt:187
GCE_DISPOSAL_BACKGROUND
#define GCE_DISPOSAL_BACKGROUND
Definition: gif.h:39
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
GifState::keyframe
int keyframe
Definition: gifdec.c:75
GifState::transparent_color_index
int transparent_color_index
Definition: gifdec.c:48
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVFrame::height
int height
Definition: frame.h:461
GifState::local_palette
uint32_t local_palette[256]
Definition: gifdec.c:72
GifState
Definition: gifdec.c:39
ff_gif_decoder
const FFCodec ff_gif_decoder
Definition: gifdec.c:552
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
GifState::has_global_palette
int has_global_palette
Definition: gifdec.c:44
gif_read_palette
static void gif_read_palette(GifState *s, uint32_t *pal, int nb)
Definition: gifdec.c:80
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
GifState::gb
GetByteContext gb
Definition: gifdec.c:67
GifState::stored_img
uint8_t * stored_img
Definition: gifdec.c:63
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
gif_decode_close
static av_cold int gif_decode_close(AVCodecContext *avctx)
Definition: gifdec.c:524
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
GifState::gce_w
int gce_w
Definition: gifdec.c:59
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_lzw_decode_tail
int ff_lzw_decode_tail(LZWState *p)
Definition: lzw.c:99
h
h
Definition: vp9dsp_template.c:2070
GifState::gce_t
int gce_t
Definition: gifdec.c:59
width
#define width
Definition: dsp.h:85
GifState::global_palette
uint32_t global_palette[256]
Definition: gifdec.c:71
GifState::avctx
AVCodecContext * avctx
Definition: gifdec.c:74
src
#define src
Definition: vp8dsp.c:248