Go to the documentation of this file.
40 #define MM_PREAMBLE_SIZE 6
42 #define MM_TYPE_RAW 0x2
43 #define MM_TYPE_INTER 0x5
44 #define MM_TYPE_INTRA 0x8
45 #define MM_TYPE_INTRA_HH 0xc
46 #define MM_TYPE_INTER_HH 0xd
47 #define MM_TYPE_INTRA_HHV 0xe
48 #define MM_TYPE_INTER_HHV 0xf
49 #define MM_TYPE_PALETTE 0x31
84 for (
int y = 0; y <
s->avctx->height; y++)
91 int start = bytestream2_get_le16(&
s->gb);
92 int count = bytestream2_get_le16(&
s->gb);
93 for (
int i = 0;
i < count;
i++)
94 s->palette[start+
i] = 0xFFU << 24 | (bytestream2_get_be24(&
s->gb) << 2);
106 int run_length,
color;
108 if (y >=
s->avctx->height)
111 color = bytestream2_get_byte(&
s->gb);
115 run_length = (
color & 0x7f) + 2;
116 color = bytestream2_get_byte(&
s->gb);
122 if (run_length >
s->avctx->width - x)
126 memset(
s->frame->data[0] + y*
s->frame->linesize[0] + x,
color, run_length);
127 if (
half_vert && y + half_vert < s->avctx->height)
128 memset(
s->frame->data[0] + (y+1)*
s->frame->linesize[0] + x,
color, run_length);
132 if (x >=
s->avctx->width) {
147 int data_off = bytestream2_get_le16(&
s->gb);
157 int length = bytestream2_get_byte(&
s->gb);
158 int x = bytestream2_get_byte(&
s->gb) + ((length & 0x80) << 1);
169 for(
i=0;
i<length;
i++) {
170 int replace_array = bytestream2_get_byte(&
s->gb);
172 int replace = (replace_array >> (7-j)) & 1;
173 if (x + half_horiz >=
s->avctx->width)
176 int color = bytestream2_get_byte(&data_ptr);
177 s->frame->data[0][y*
s->frame->linesize[0] + x] =
color;
179 s->frame->data[0][y*
s->frame->linesize[0] + x + 1] =
color;
181 s->frame->data[0][(y+1)*
s->frame->linesize[0] + x] =
color;
183 s->frame->data[0][(y+1)*
s->frame->linesize[0] + x + 1] =
color;
199 const uint8_t *buf = avpkt->
data;
200 int buf_size = avpkt->
size;
const FFCodec ff_mmvideo_decoder
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const uint8_t * buffer_start
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static int mm_decode_intra(MmContext *s, int half_horiz, int half_vert)
AVCodec p
The public AVCodec.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_cold int mm_decode_end(AVCodecContext *avctx)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_CODEC_DECODE_CB(func)
static int mm_decode_raw(MmContext *s)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
#define CODEC_LONG_NAME(str)
unsigned int palette[AVPALETTE_COUNT]
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
#define MM_TYPE_INTER_HHV
static int mm_decode_inter(MmContext *s, int half_horiz, int half_vert)
#define i(width, name, range_min, range_max)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define MM_TYPE_INTRA_HHV
static int mm_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
static uint8_t half_vert(BlockXY bxy)
main external API structure.
static void mm_decode_pal(MmContext *s)
static av_cold int mm_decode_init(AVCodecContext *avctx)
This structure stores compressed data.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.