36 #define KMVC_KEYFRAME 0x80 37 #define KMVC_PALETTE 0x40 38 #define KMVC_METHOD 0x0F 39 #define MAX_PALSIZE 256 60 #define BLK(data, x, y) data[av_clip((x) + (y) * 320, 0, 320 * 200 -1)] 62 #define kmvc_init_getbits(bb, g) bb.bits = 7; bb.bitbuf = bytestream2_get_byte(g); 64 #define kmvc_getbit(bb, g, res) {\ 66 if (bb.bitbuf & (1 << bb.bits)) res = 1; \ 69 bb.bitbuf = bytestream2_get_byte(g); \ 80 int l0x, l1x, l0y, l1y;
85 for (by = 0; by <
h; by += 8)
86 for (bx = 0; bx <
w; bx += 8) {
93 val = bytestream2_get_byte(&ctx->
g);
94 for (i = 0; i < 64; i++)
95 BLK(ctx->
cur, bx + (i & 0x7), by + (i >> 3)) =
val;
97 for (i = 0; i < 4; i++) {
98 l0x = bx + (i & 1) * 4;
99 l0y = by + (i & 2) * 2;
104 val = bytestream2_get_byte(&ctx->
g);
105 for (j = 0; j < 16; j++)
106 BLK(ctx->
cur, l0x + (j & 3), l0y + (j >> 2)) =
val;
108 val = bytestream2_get_byte(&ctx->
g);
111 if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 320*197 - 4) {
115 for (j = 0; j < 16; j++)
116 BLK(ctx->
cur, l0x + (j & 3), l0y + (j >> 2)) =
117 BLK(ctx->
cur, l0x + (j & 3) - mx, l0y + (j >> 2) - my);
120 for (j = 0; j < 4; j++) {
121 l1x = l0x + (j & 1) * 2;
127 val = bytestream2_get_byte(&ctx->
g);
133 val = bytestream2_get_byte(&ctx->
g);
136 if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 320*199 - 2) {
140 BLK(ctx->
cur, l1x, l1y) =
BLK(ctx->
cur, l1x - mx, l1y - my);
141 BLK(ctx->
cur, l1x + 1, l1y) =
142 BLK(ctx->
cur, l1x + 1 - mx, l1y - my);
143 BLK(ctx->
cur, l1x, l1y + 1) =
144 BLK(ctx->
cur, l1x - mx, l1y + 1 - my);
145 BLK(ctx->
cur, l1x + 1, l1y + 1) =
146 BLK(ctx->
cur, l1x + 1 - mx, l1y + 1 - my);
149 BLK(ctx->
cur, l1x, l1y) = bytestream2_get_byte(&ctx->
g);
150 BLK(ctx->
cur, l1x + 1, l1y) = bytestream2_get_byte(&ctx->
g);
151 BLK(ctx->
cur, l1x, l1y + 1) = bytestream2_get_byte(&ctx->
g);
152 BLK(ctx->
cur, l1x + 1, l1y + 1) = bytestream2_get_byte(&ctx->
g);
169 int l0x, l1x, l0y, l1y;
174 for (by = 0; by <
h; by += 8)
175 for (bx = 0; bx <
w; bx += 8) {
184 val = bytestream2_get_byte(&ctx->
g);
185 for (i = 0; i < 64; i++)
186 BLK(ctx->
cur, bx + (i & 0x7), by + (i >> 3)) =
val;
188 for (i = 0; i < 64; i++)
189 BLK(ctx->
cur, bx + (i & 0x7), by + (i >> 3)) =
190 BLK(ctx->
prev, bx + (i & 0x7), by + (i >> 3));
197 for (i = 0; i < 4; i++) {
198 l0x = bx + (i & 1) * 4;
199 l0y = by + (i & 2) * 2;
204 val = bytestream2_get_byte(&ctx->
g);
205 for (j = 0; j < 16; j++)
206 BLK(ctx->
cur, l0x + (j & 3), l0y + (j >> 2)) =
val;
208 val = bytestream2_get_byte(&ctx->
g);
209 mx = (val & 0xF) - 8;
211 if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 320*197 - 4) {
215 for (j = 0; j < 16; j++)
216 BLK(ctx->
cur, l0x + (j & 3), l0y + (j >> 2)) =
217 BLK(ctx->
prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my);
220 for (j = 0; j < 4; j++) {
221 l1x = l0x + (j & 1) * 2;
227 val = bytestream2_get_byte(&ctx->
g);
233 val = bytestream2_get_byte(&ctx->
g);
234 mx = (val & 0xF) - 8;
236 if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 320*199 - 2) {
240 BLK(ctx->
cur, l1x, l1y) =
BLK(ctx->
prev, l1x + mx, l1y + my);
241 BLK(ctx->
cur, l1x + 1, l1y) =
242 BLK(ctx->
prev, l1x + 1 + mx, l1y + my);
243 BLK(ctx->
cur, l1x, l1y + 1) =
244 BLK(ctx->
prev, l1x + mx, l1y + 1 + my);
245 BLK(ctx->
cur, l1x + 1, l1y + 1) =
246 BLK(ctx->
prev, l1x + 1 + mx, l1y + 1 + my);
249 BLK(ctx->
cur, l1x, l1y) = bytestream2_get_byte(&ctx->
g);
250 BLK(ctx->
cur, l1x + 1, l1y) = bytestream2_get_byte(&ctx->
g);
251 BLK(ctx->
cur, l1x, l1y + 1) = bytestream2_get_byte(&ctx->
g);
252 BLK(ctx->
cur, l1x + 1, l1y + 1) = bytestream2_get_byte(&ctx->
g);
280 header = bytestream2_get_byte(&ctx->
g);
283 if (bytestream2_peek_byte(&ctx->
g) == 127) {
285 for (i = 0; i < 127; i++) {
286 ctx->
pal[i + (header & 0x81)] = 0xFFU << 24 | bytestream2_get_be24(&ctx->
g);
303 for (i = 1; i <= ctx->
palsize; i++) {
304 ctx->
pal[
i] = 0xFF
U << 24 | bytestream2_get_be24(&ctx->
g);
314 memcpy(frame->
data[1], ctx->
pal, 1024);
316 blocksize = bytestream2_get_byte(&ctx->
g);
318 if (blocksize != 8 && blocksize != 127) {
322 memset(ctx->
cur, 0, 320 * 200);
326 memcpy(ctx->
cur, ctx->
prev, 320 * 200);
339 out = frame->
data[0];
341 for (i = 0; i < avctx->
height; i++) {
342 memcpy(out, src, avctx->
width);
376 for (i = 0; i < 256; i++) {
377 c->
pal[
i] = 0xFF
U << 24 | i * 0x10101;
382 "Extradata missing, decoding may not work properly...\n");
395 for (i = 0; i < 256; i++) {
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so, copy it to dst and return 1; else return 0.
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_cold int init(AVCodecContext *avctx)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static int kmvc_decode_inter_8x8(KmvcContext *ctx, int w, int h)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
8 bits with AV_PIX_FMT_RGB32 palette
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static const uint8_t header[24]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int kmvc_decode_intra_8x8(KmvcContext *ctx, int w, int h)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * name
Name of the codec implementation.
#define kmvc_init_getbits(bb, g)
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
#define kmvc_getbit(bb, g, res)
static av_cold int decode_init(AVCodecContext *avctx)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int palette_has_changed
Tell user application that palette has changed from previous frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
common internal api header.
common internal and external API header
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
#define FFSWAP(type, a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint32_t pal[MAX_PALSIZE]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static double val(void *priv, double ch)
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...