FFmpeg
vmdvideo.c
Go to the documentation of this file.
1 /*
2  * Sierra VMD video decoder
3  * Copyright (c) 2004 The FFmpeg Project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sierra VMD video decoder
25  * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
26  * for more information on the Sierra VMD format, visit:
27  * http://www.pcisys.net/~melanson/codecs/
28  *
29  * The video decoder outputs PAL8 colorspace data. The decoder expects
30  * a 0x330-byte VMD file header to be transmitted via extradata during
31  * codec initialization. Each encoded frame that is sent to this decoder
32  * is expected to be prepended with the appropriate 16-byte frame
33  * information record from the VMD file.
34  */
35 
36 #include <string.h>
37 
38 #include "libavutil/common.h"
39 #include "libavutil/intreadwrite.h"
40 
41 #include "avcodec.h"
42 #include "internal.h"
43 #include "bytestream.h"
44 
45 #define VMD_HEADER_SIZE 0x330
46 #define PALETTE_COUNT 256
47 
48 typedef struct VmdVideoContext {
49 
52 
53  const unsigned char *buf;
54  int size;
55 
56  unsigned char palette[PALETTE_COUNT * 4];
57  unsigned char *unpack_buffer;
59 
60  int x_off, y_off;
62 
63 #define QUEUE_SIZE 0x1000
64 #define QUEUE_MASK 0x0FFF
65 
66 static int lz_unpack(const unsigned char *src, int src_len,
67  unsigned char *dest, int dest_len)
68 {
69  unsigned char *d;
70  unsigned char *d_end;
71  unsigned char queue[QUEUE_SIZE];
72  unsigned int qpos;
73  unsigned int dataleft;
74  unsigned int chainofs;
75  unsigned int chainlen;
76  unsigned int speclen;
77  unsigned char tag;
78  unsigned int i, j;
79  GetByteContext gb;
80 
81  bytestream2_init(&gb, src, src_len);
82  d = dest;
83  d_end = d + dest_len;
84  dataleft = bytestream2_get_le32(&gb);
85  memset(queue, 0x20, QUEUE_SIZE);
86  if (bytestream2_get_bytes_left(&gb) < 4)
87  return AVERROR_INVALIDDATA;
88  if (bytestream2_peek_le32(&gb) == 0x56781234) {
89  bytestream2_skipu(&gb, 4);
90  qpos = 0x111;
91  speclen = 0xF + 3;
92  } else {
93  qpos = 0xFEE;
94  speclen = 100; /* no speclen */
95  }
96 
97  while (dataleft > 0 && bytestream2_get_bytes_left(&gb) > 0) {
98  tag = bytestream2_get_byteu(&gb);
99  if ((tag == 0xFF) && (dataleft > 8)) {
100  if (d_end - d < 8 || bytestream2_get_bytes_left(&gb) < 8)
101  return AVERROR_INVALIDDATA;
102  for (i = 0; i < 8; i++) {
103  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
104  qpos &= QUEUE_MASK;
105  }
106  dataleft -= 8;
107  } else {
108  for (i = 0; i < 8; i++) {
109  if (dataleft == 0)
110  break;
111  if (tag & 0x01) {
112  if (d_end - d < 1 || bytestream2_get_bytes_left(&gb) < 1)
113  return AVERROR_INVALIDDATA;
114  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
115  qpos &= QUEUE_MASK;
116  dataleft--;
117  } else {
118  chainofs = bytestream2_get_byte(&gb);
119  chainofs |= ((bytestream2_peek_byte(&gb) & 0xF0) << 4);
120  chainlen = (bytestream2_get_byte(&gb) & 0x0F) + 3;
121  if (chainlen == speclen) {
122  chainlen = bytestream2_get_byte(&gb) + 0xF + 3;
123  }
124  if (d_end - d < chainlen)
125  return AVERROR_INVALIDDATA;
126  for (j = 0; j < chainlen; j++) {
127  *d = queue[chainofs++ & QUEUE_MASK];
128  queue[qpos++] = *d++;
129  qpos &= QUEUE_MASK;
130  }
131  dataleft -= chainlen;
132  }
133  tag >>= 1;
134  }
135  }
136  }
137  return d - dest;
138 }
139 static int rle_unpack(const unsigned char *src, unsigned char *dest,
140  int src_count, int src_size, int dest_len)
141 {
142  unsigned char *pd;
143  int i, l, used = 0;
144  unsigned char *dest_end = dest + dest_len;
145  GetByteContext gb;
146  uint16_t run_val;
147 
148  bytestream2_init(&gb, src, src_size);
149  pd = dest;
150  if (src_count & 1) {
151  if (bytestream2_get_bytes_left(&gb) < 1)
152  return 0;
153  *pd++ = bytestream2_get_byteu(&gb);
154  used++;
155  }
156 
157  do {
158  if (bytestream2_get_bytes_left(&gb) < 1)
159  break;
160  l = bytestream2_get_byteu(&gb);
161  if (l & 0x80) {
162  l = (l & 0x7F) * 2;
163  if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l)
164  return bytestream2_tell(&gb);
165  bytestream2_get_bufferu(&gb, pd, l);
166  pd += l;
167  } else {
168  if (dest_end - pd < 2*l || bytestream2_get_bytes_left(&gb) < 2)
169  return bytestream2_tell(&gb);
170  run_val = bytestream2_get_ne16(&gb);
171  for (i = 0; i < l; i++) {
172  AV_WN16(pd, run_val);
173  pd += 2;
174  }
175  l *= 2;
176  }
177  used += l;
178  } while (used < src_count);
179 
180  return bytestream2_tell(&gb);
181 }
182 
184 {
185  int i;
186  unsigned int *palette32;
187  unsigned char r, g, b;
188 
189  GetByteContext gb;
190 
191  unsigned char meth;
192  unsigned char *dp; /* pointer to current frame */
193  unsigned char *pp; /* pointer to previous frame */
194  unsigned char len;
195  int ofs;
196 
197  int frame_x, frame_y, prev_linesize;
198  int frame_width, frame_height;
199 
200  frame_x = AV_RL16(&s->buf[6]);
201  frame_y = AV_RL16(&s->buf[8]);
202  frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
203  frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
204 
205  if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
206  (frame_x || frame_y)) {
207 
208  s->x_off = frame_x;
209  s->y_off = frame_y;
210  }
211  frame_x -= s->x_off;
212  frame_y -= s->y_off;
213 
214  if (frame_x < 0 || frame_width < 0 ||
215  frame_x >= s->avctx->width ||
216  frame_width > s->avctx->width ||
217  frame_x + frame_width > s->avctx->width) {
218  av_log(s->avctx, AV_LOG_ERROR,
219  "Invalid horizontal range %d-%d\n",
220  frame_x, frame_width);
221  return AVERROR_INVALIDDATA;
222  }
223  if (frame_y < 0 || frame_height < 0 ||
224  frame_y >= s->avctx->height ||
225  frame_height > s->avctx->height ||
226  frame_y + frame_height > s->avctx->height) {
227  av_log(s->avctx, AV_LOG_ERROR,
228  "Invalid vertical range %d-%d\n",
229  frame_y, frame_height);
230  return AVERROR_INVALIDDATA;
231  }
232 
233  /* if only a certain region will be updated, copy the entire previous
234  * frame before the decode */
235  if (s->prev_frame->data[0] &&
236  (frame_x || frame_y || (frame_width != s->avctx->width) ||
237  (frame_height != s->avctx->height))) {
238 
239  memcpy(frame->data[0], s->prev_frame->data[0],
240  s->avctx->height * frame->linesize[0]);
241  }
242 
243  /* check if there is a new palette */
244  bytestream2_init(&gb, s->buf + 16, s->size - 16);
245  if (s->buf[15] & 0x02) {
246  bytestream2_skip(&gb, 2);
247  palette32 = (unsigned int *)s->palette;
249  for (i = 0; i < PALETTE_COUNT; i++) {
250  r = bytestream2_get_byteu(&gb) * 4;
251  g = bytestream2_get_byteu(&gb) * 4;
252  b = bytestream2_get_byteu(&gb) * 4;
253  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
254  palette32[i] |= palette32[i] >> 6 & 0x30303;
255  }
256  } else {
257  av_log(s->avctx, AV_LOG_ERROR, "Incomplete palette\n");
258  return AVERROR_INVALIDDATA;
259  }
260  }
261 
262  if (!s->size)
263  return 0;
264 
265  /* originally UnpackFrame in VAG's code */
266  if (bytestream2_get_bytes_left(&gb) < 1)
267  return AVERROR_INVALIDDATA;
268  meth = bytestream2_get_byteu(&gb);
269  if (meth & 0x80) {
270  int size;
271  if (!s->unpack_buffer_size) {
272  av_log(s->avctx, AV_LOG_ERROR,
273  "Trying to unpack LZ-compressed frame with no LZ buffer\n");
274  return AVERROR_INVALIDDATA;
275  }
277  s->unpack_buffer, s->unpack_buffer_size);
278  if (size < 0)
279  return size;
280  meth &= 0x7F;
281  bytestream2_init(&gb, s->unpack_buffer, size);
282  }
283 
284  dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
285  if (s->prev_frame->data[0]) {
286  prev_linesize = s->prev_frame->linesize[0];
287  pp = s->prev_frame->data[0] + frame_y * prev_linesize + frame_x;
288  } else {
289  pp = NULL;
290  prev_linesize = 0;
291  }
292  switch (meth) {
293  case 1:
294  for (i = 0; i < frame_height; i++) {
295  ofs = 0;
296  do {
297  len = bytestream2_get_byte(&gb);
298  if (len & 0x80) {
299  len = (len & 0x7F) + 1;
300  if (ofs + len > frame_width ||
302  return AVERROR_INVALIDDATA;
303  bytestream2_get_bufferu(&gb, &dp[ofs], len);
304  ofs += len;
305  } else {
306  /* interframe pixel copy */
307  if (ofs + len + 1 > frame_width || !pp)
308  return AVERROR_INVALIDDATA;
309  memcpy(&dp[ofs], &pp[ofs], len + 1);
310  ofs += len + 1;
311  }
312  } while (ofs < frame_width);
313  if (ofs > frame_width) {
314  av_log(s->avctx, AV_LOG_ERROR,
315  "offset > width (%d > %d)\n",
316  ofs, frame_width);
317  return AVERROR_INVALIDDATA;
318  }
319  dp += frame->linesize[0];
320  pp = FF_PTR_ADD(pp, prev_linesize);
321  }
322  break;
323 
324  case 2:
325  for (i = 0; i < frame_height; i++) {
326  bytestream2_get_buffer(&gb, dp, frame_width);
327  dp += frame->linesize[0];
328  }
329  break;
330 
331  case 3:
332  for (i = 0; i < frame_height; i++) {
333  ofs = 0;
334  do {
335  len = bytestream2_get_byte(&gb);
336  if (len & 0x80) {
337  len = (len & 0x7F) + 1;
338  if (bytestream2_peek_byte(&gb) == 0xFF) {
339  int slen = len;
340  bytestream2_get_byte(&gb);
341  len = rle_unpack(gb.buffer, &dp[ofs],
343  frame_width - ofs);
344  ofs += slen;
345  bytestream2_skip(&gb, len);
346  } else {
347  if (ofs + len > frame_width ||
349  return AVERROR_INVALIDDATA;
350  bytestream2_get_buffer(&gb, &dp[ofs], len);
351  ofs += len;
352  }
353  } else {
354  /* interframe pixel copy */
355  if (ofs + len + 1 > frame_width || !pp)
356  return AVERROR_INVALIDDATA;
357  memcpy(&dp[ofs], &pp[ofs], len + 1);
358  ofs += len + 1;
359  }
360  } while (ofs < frame_width);
361  if (ofs > frame_width) {
362  av_log(s->avctx, AV_LOG_ERROR,
363  "offset > width (%d > %d)\n",
364  ofs, frame_width);
365  return AVERROR_INVALIDDATA;
366  }
367  dp += frame->linesize[0];
368  pp = FF_PTR_ADD(pp, prev_linesize);
369  }
370  break;
371  }
372  return 0;
373 }
374 
376 {
377  VmdVideoContext *s = avctx->priv_data;
378 
379  av_frame_free(&s->prev_frame);
380  av_freep(&s->unpack_buffer);
381  s->unpack_buffer_size = 0;
382 
383  return 0;
384 }
385 
387 {
388  VmdVideoContext *s = avctx->priv_data;
389  int i;
390  unsigned int *palette32;
391  int palette_index = 0;
392  unsigned char r, g, b;
393  unsigned char *vmd_header;
394  unsigned char *raw_palette;
395 
396  s->avctx = avctx;
397  avctx->pix_fmt = AV_PIX_FMT_PAL8;
398 
399  /* make sure the VMD header made it */
400  if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
401  av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n",
403  return AVERROR_INVALIDDATA;
404  }
405  vmd_header = (unsigned char *)avctx->extradata;
406 
407  s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
408  if (s->unpack_buffer_size) {
409  s->unpack_buffer = av_malloc(s->unpack_buffer_size);
410  if (!s->unpack_buffer)
411  return AVERROR(ENOMEM);
412  }
413 
414  /* load up the initial palette */
415  raw_palette = &vmd_header[28];
416  palette32 = (unsigned int *)s->palette;
417  for (i = 0; i < PALETTE_COUNT; i++) {
418  r = raw_palette[palette_index++] * 4;
419  g = raw_palette[palette_index++] * 4;
420  b = raw_palette[palette_index++] * 4;
421  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
422  palette32[i] |= palette32[i] >> 6 & 0x30303;
423  }
424 
425  s->prev_frame = av_frame_alloc();
426  if (!s->prev_frame) {
427  vmdvideo_decode_end(avctx);
428  return AVERROR(ENOMEM);
429  }
430 
431  return 0;
432 }
433 
435  void *data, int *got_frame,
436  AVPacket *avpkt)
437 {
438  const uint8_t *buf = avpkt->data;
439  int buf_size = avpkt->size;
440  VmdVideoContext *s = avctx->priv_data;
441  AVFrame *frame = data;
442  int ret;
443 
444  s->buf = buf;
445  s->size = buf_size;
446 
447  if (buf_size < 16)
448  return AVERROR_INVALIDDATA;
449 
450  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
451  return ret;
452 
453  if ((ret = vmd_decode(s, frame)) < 0)
454  return ret;
455 
456  /* make the palette available on the way out */
457  memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
458 
459  /* shuffle frames */
460  av_frame_unref(s->prev_frame);
461  if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
462  return ret;
463 
464  *got_frame = 1;
465 
466  /* report that the buffer was completely consumed */
467  return buf_size;
468 }
469 
471  .name = "vmdvideo",
472  .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
473  .type = AVMEDIA_TYPE_VIDEO,
474  .id = AV_CODEC_ID_VMDVIDEO,
475  .priv_data_size = sizeof(VmdVideoContext),
477  .close = vmdvideo_decode_end,
479  .capabilities = AV_CODEC_CAP_DR1,
480 };
ff_vmdvideo_decoder
AVCodec ff_vmdvideo_decoder
Definition: vmdvideo.c:470
VmdVideoContext::prev_frame
AVFrame * prev_frame
Definition: vmdvideo.c:51
AVCodec
AVCodec.
Definition: codec.h:197
QUEUE_MASK
#define QUEUE_MASK
Definition: vmdvideo.c:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
internal.h
vmdvideo_decode_frame
static int vmdvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vmdvideo.c:434
AVPacket::data
uint8_t * data
Definition: packet.h:369
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:142
vmdvideo_decode_end
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
Definition: vmdvideo.c:375
VmdVideoContext::palette
unsigned char palette[PALETTE_COUNT *4]
Definition: vmdvideo.c:56
PALETTE_COUNT
#define PALETTE_COUNT
Definition: vmdvideo.c:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
VmdVideoContext::unpack_buffer
unsigned char * unpack_buffer
Definition: vmdvideo.c:57
U
#define U(x)
Definition: vp56_arith.h:37
VmdVideoContext::unpack_buffer_size
int unpack_buffer_size
Definition: vmdvideo.c:58
VmdVideoContext::buf
const unsigned char * buf
Definition: vmdvideo.c:53
VmdVideoContext::avctx
AVCodecContext * avctx
Definition: vmdvideo.c:50
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
rle_unpack
static int rle_unpack(const unsigned char *src, unsigned char *dest, int src_count, int src_size, int dest_len)
Definition: vmdvideo.c:139
g
const char * g
Definition: vf_curves.c:117
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
src
#define src
Definition: vp8dsp.c:255
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:105
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
AV_CODEC_ID_VMDVIDEO
@ AV_CODEC_ID_VMDVIDEO
Definition: codec_id.h:101
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
VmdVideoContext::size
int size
Definition: vmdvideo.c:54
bytestream2_get_ne16
#define bytestream2_get_ne16
Definition: bytestream.h:119
size
int size
Definition: twinvq_data.h:10344
VmdVideoContext
Definition: vmdvideo.c:48
i
int i
Definition: input.c:407
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
common.h
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1611
ret
ret
Definition: filter_design.txt:187
lz_unpack
static int lz_unpack(const unsigned char *src, int src_len, unsigned char *dest, int dest_len)
Definition: vmdvideo.c:66
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
VMD_HEADER_SIZE
#define VMD_HEADER_SIZE
Definition: vmdvideo.c:45
AVCodecContext
main external API structure.
Definition: avcodec.h:536
VmdVideoContext::y_off
int y_off
Definition: vmdvideo.c:60
QUEUE_SIZE
#define QUEUE_SIZE
Definition: vmdvideo.c:63
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
vmd_decode
static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
Definition: vmdvideo.c:183
bytestream.h
vmdvideo_decode_init
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
Definition: vmdvideo.c:386
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
VmdVideoContext::x_off
int x_off
Definition: vmdvideo.c:60
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:372