Go to the documentation of this file.
40 #define MM_PREAMBLE_SIZE 6
42 #define MM_TYPE_INTER 0x5
43 #define MM_TYPE_INTRA 0x8
44 #define MM_TYPE_INTRA_HH 0xc
45 #define MM_TYPE_INTER_HH 0xd
46 #define MM_TYPE_INTRA_HHV 0xe
47 #define MM_TYPE_INTER_HHV 0xf
48 #define MM_TYPE_PALETTE 0x31
84 for (
i = 0;
i < 128;
i++) {
85 s->palette[
i] = 0xFF
U << 24 | bytestream2_get_be24(&
s->gb);
86 s->palette[
i+128] =
s->palette[
i]<<2;
99 int run_length,
color;
101 if (y >=
s->avctx->height)
104 color = bytestream2_get_byte(&
s->gb);
108 run_length = (
color & 0x7f) + 2;
109 color = bytestream2_get_byte(&
s->gb);
115 if (run_length >
s->avctx->width - x)
119 memset(
s->frame->data[0] + y*
s->frame->linesize[0] + x,
color, run_length);
120 if (
half_vert && y + half_vert < s->avctx->height)
121 memset(
s->frame->data[0] + (y+1)*
s->frame->linesize[0] + x,
color, run_length);
125 if (x >=
s->avctx->width) {
140 int data_off = bytestream2_get_le16(&
s->gb);
150 int length = bytestream2_get_byte(&
s->gb);
151 int x = bytestream2_get_byte(&
s->gb) + ((length & 0x80) << 1);
162 for(
i=0;
i<length;
i++) {
163 int replace_array = bytestream2_get_byte(&
s->gb);
165 int replace = (replace_array >> (7-j)) & 1;
166 if (x + half_horiz >=
s->avctx->width)
169 int color = bytestream2_get_byte(&data_ptr);
170 s->frame->data[0][y*
s->frame->linesize[0] + x] =
color;
172 s->frame->data[0][y*
s->frame->linesize[0] + x + 1] =
color;
174 s->frame->data[0][(y+1)*
s->frame->linesize[0] + x] =
color;
176 s->frame->data[0][(y+1)*
s->frame->linesize[0] + x + 1] =
color;
192 const uint8_t *buf = avpkt->
data;
193 int buf_size = avpkt->
size;
const FFCodec ff_mmvideo_decoder
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const uint8_t * buffer_start
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static int mm_decode_intra(MmContext *s, int half_horiz, int half_vert)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
AVCodec p
The public AVCodec.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_cold int mm_decode_end(AVCodecContext *avctx)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_CODEC_DECODE_CB(func)
int(* init)(AVBSFContext *ctx)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
#define CODEC_LONG_NAME(str)
unsigned int palette[AVPALETTE_COUNT]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
#define MM_TYPE_INTER_HHV
static int mm_decode_inter(MmContext *s, int half_horiz, int half_vert)
#define i(width, name, range_min, range_max)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define MM_TYPE_INTRA_HHV
static int mm_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
static uint8_t half_vert(BlockXY bxy)
main external API structure.
static void mm_decode_pal(MmContext *s)
static av_cold int mm_decode_init(AVCodecContext *avctx)
This structure stores compressed data.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.