Go to the documentation of this file.
53 #define PREAMBLE_SIZE 4096
66 for (
i = 0;
i < 2;
i++) {
67 for (j = 0; j < 256; j++) {
68 for (k = 0; k < 8; k++) {
69 gdv->
frame[
i * 2048 + j * 8 + k] = j;
80 for (x = 0; x <
w - 7; x+=8) {
82 dst[x + 1] =
src[(x>>1) + 0];
84 dst[x + 3] =
src[(x>>1) + 1];
86 dst[x + 5] =
src[(x>>1) + 2];
88 dst[x + 7] =
src[(x>>1) + 3];
99 for (x =
w - 1; (x+1) & 7; x--) {
102 for (x -= 7; x >= 0; x -= 8) {
104 dst[x + 7] =
src[(x>>1) + 3];
106 dst[x + 5] =
src[(x>>1) + 2];
108 dst[x + 3] =
src[(x>>1) + 1];
110 dst[x + 1] =
src[(x>>1) + 0];
117 for (x = 0; x <
w - 7; x+=8) {
118 dst[x + 0] =
src[2*x + 0];
119 dst[x + 1] =
src[2*x + 2];
120 dst[x + 2] =
src[2*x + 4];
121 dst[x + 3] =
src[2*x + 6];
122 dst[x + 4] =
src[2*x + 8];
123 dst[x + 5] =
src[2*x +10];
124 dst[x + 6] =
src[2*x +12];
125 dst[x + 7] =
src[2*x +14];
141 for (j = 0; j <
h; j++) {
149 for (j = 0; j <
h; j++) {
153 memcpy(dst1,
src1,
w);
157 if (scale_h && scale_v) {
158 for (y = 0; y < (
h>>1); y++) {
163 }
else if (scale_h) {
164 for (y = 0; y < (
h>>1); y++) {
167 memcpy(dst1,
src1,
w);
169 }
else if (scale_v) {
170 for (y = 0; y <
h; y++) {
184 if (
bits->fill == 0) {
185 bits->queue |= bytestream2_get_byte(gb);
188 res =
bits->queue >> 6;
197 bits->queue = bytestream2_get_le32(gb);
203 int res =
bits->queue & ((1 << nbits) - 1);
205 bits->queue >>= nbits;
207 if (
bits->fill <= 16) {
208 bits->queue |= bytestream2_get_le16(gb) <<
bits->fill;
223 c = bytestream2_get_byte(g2);
224 for (
i = 0;
i <
len;
i++) {
225 bytestream2_put_byte(pb,
c);
231 for (
i = 0;
i <
len;
i++) {
232 bytestream2_put_byte(pb, bytestream2_get_byte(g2));
238 for (
i = 0;
i <
len;
i++) {
239 bytestream2_put_byte(pb, bytestream2_get_byte(g2));
256 for (
c = 0;
c < 256;
c++) {
257 for (
i = 0;
i < 16;
i++) {
265 bytestream2_put_byte(pb, bytestream2_get_byte(gb));
266 }
else if (
tag == 1) {
267 int b = bytestream2_get_byte(gb);
268 int len = (
b & 0xF) + 3;
269 int top = (
b >> 4) & 0xF;
270 int off = (bytestream2_get_byte(gb) << 4) + top - 4096;
272 }
else if (
tag == 2) {
273 int len = (bytestream2_get_byte(gb)) + 2;
302 bytestream2_put_byte(pb, bytestream2_get_byte(gb));
303 }
else if (
tag == 1) {
304 int b = bytestream2_get_byte(gb);
305 int len = (
b & 0xF) + 3;
307 int off = (bytestream2_get_byte(gb) << 4) + top - 4096;
309 }
else if (
tag == 2) {
311 int b = bytestream2_get_byte(gb);
318 len = bytestream2_get_le16(gb);
322 int b = bytestream2_get_byte(gb);
323 int len = (
b & 0x3) + 2;
324 int off = -(
b >> 2) - 1;
350 bytestream2_put_byte(pb, bytestream2_get_byte(gb));
360 if (
val != ((1 << lbits) - 1)) {
366 for (
i = 0;
i <
len;
i++) {
367 bytestream2_put_byte(pb, bytestream2_get_byte(gb));
370 }
else if (
tag == 1) {
377 int bb = bytestream2_get_byte(gb);
378 if ((bb & 0x80) == 0) {
381 int top = (bb & 0x7F) << 8;
382 len = top + bytestream2_get_byte(gb) + 146;
386 }
else if (
tag == 2) {
391 int offs = top + bytestream2_get_byte(gb);
392 if ((subtag != 0) || (offs <= 0xF80)) {
393 int len = (subtag) + 3;
402 real_off = ((offs >> 4) & 0x7) + 1;
403 len = ((offs & 0xF) + 2) * 2;
406 for (
i = 0;
i <
len/2;
i++) {
407 bytestream2_put_byte(pb,
c1);
408 bytestream2_put_byte(pb,
c2);
412 int b = bytestream2_get_byte(gb);
413 int off = ((
b & 0x7F)) + 1;
414 int len = ((
b & 0x80) == 0) ? 2 : 3;
422 int q,
b = bytestream2_get_byte(gb);
423 if ((
b & 0xC0) == 0xC0) {
424 len = ((
b & 0x3F)) + 8;
426 off = (q << 8) + (bytestream2_get_byte(gb)) + 1;
429 if ((
b & 0x80) == 0) {
430 len = ((
b >> 4)) + 6;
433 len = ((
b & 0x3F)) + 14;
436 off = (ofs1 << 8) + (bytestream2_get_byte(gb)) - 4096;
439 int ofs1,
b = bytestream2_get_byte(gb);
441 if ((
b >> 4) == 0xF) {
442 len = bytestream2_get_byte(gb) + 21;
447 off = (ofs1 << 8) + bytestream2_get_byte(gb) - 4096;
473 flags = bytestream2_get_le32(gb);
474 compression =
flags & 0xF;
476 if (compression == 4 || compression == 7 || compression > 8)
488 switch (compression) {
492 for (
i = 0;
i < 256;
i++) {
493 unsigned r = bytestream2_get_byte(gb);
494 unsigned g = bytestream2_get_byte(gb);
495 unsigned b = bytestream2_get_byte(gb);
496 gdv->
pal[
i] = 0xFF
U << 24 |
r << 18 |
g << 10 |
b << 2;
526 for (y = 0; y < avctx->
height; y++) {
528 sidx += avctx->
width;
529 didx +=
frame->linesize[0];
535 for (y = 0; y < avctx->
height; y++) {
539 uint8_t *dst2 =
dst + didx;
544 if (!gdv->
scale_h || ((y & 1) == 1)) {
547 didx +=
frame->linesize[0];
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int decompress_2(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static int decompress_68(AVCodecContext *avctx, unsigned skip, unsigned use8)
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
AVCodec p
The public AVCodec.
static double val(void *priv, double ch)
static int gdv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
static void scaleup(uint8_t *dst, const uint8_t *src, int w)
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
#define FF_CODEC_DECODE_CB(func)
static int read_bits2(Bits8 *bits, GetByteContext *gb)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static av_cold int gdv_decode_init(AVCodecContext *avctx)
static void lz_copy(PutByteContext *pb, GetByteContext *g2, int offset, unsigned len)
#define CODEC_LONG_NAME(str)
static av_cold int gdv_decode_close(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static void rescale(GDVContext *gdv, uint8_t *dst, int w, int h, int scale_v, int scale_h)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static void fill_bits32(Bits32 *bits, GetByteContext *gb)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define i(width, name, range_min, range_max)
static void scaledown(uint8_t *dst, const uint8_t *src, int w)
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
const FFCodec ff_gdv_decoder
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void scaleup_rev(uint8_t *dst, const uint8_t *src, int w)
main external API structure.
static int decompress_5(AVCodecContext *avctx, unsigned skip)
This structure stores compressed data.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so,...
static int read_bits32(Bits32 *bits, GetByteContext *gb, int nbits)
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.