Go to the documentation of this file.
27 #define MAX_HUFF_CODES 16
69 int w4 = (avctx->
width + 3) & ~3;
70 int h4 = (avctx->
height + 3) & ~3;
122 for (
i = 0;
i <
w; ++
i)
220 while (x < mp->avctx->width) {
227 for (
i = (x + 3) & ~3;
i < x +
w;
i += 4) {
237 p.
y = av_clip_uintp2(p.
y, 5);
241 p.
v = av_clip_intp2(p.
v, 5);
243 p.
u = av_clip_intp2(p.
u, 5);
269 p.
y = av_clip_uintp2(p.
y, 5);
272 p.
v = av_clip_intp2(p.
v, 5);
274 p.
u = av_clip_intp2(p.
u, 5);
280 for (y0 = 0; y0 < 2; ++y0)
286 void *
data,
int *got_frame,
290 int buf_size = avpkt->
size;
293 int i, count1, count2, sz,
ret;
305 memcpy(mp->
bswapbuf + (buf_size & ~3),
buf + (buf_size & ~3), buf_size & 3);
348 .
name =
"motionpixels",
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
uint8_t gradient_scale[3]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
void * av_mallocz_array(size_t nmemb, size_t size)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
static int mp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_cold int mp_decode_init(AVCodecContext *avctx)
static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int count, int bits_len, int read_color)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mp_yuv_to_rgb(int y, int v, int u, int clip_rgb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
void ff_free_vlc(VLC *vlc)
static av_cold void motionpixels_tableinit(void)
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
@ AV_CODEC_ID_MOTIONPIXELS
static av_always_inline int mp_gradient(MotionPixelsContext *mp, int component, int v)
static unsigned int get_bits1(GetBitContext *s)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static int mp_get_code(MotionPixelsContext *mp, GetBitContext *gb, int size, int code)
static av_always_inline int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static YuvPixel mp_rgb_yuv_table[1<< 15]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define AV_PIX_FMT_RGB555
static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const YuvPixel *p)
HuffCode codes[MAX_HUFF_CODES]
static int mp_read_codes_table(MotionPixelsContext *mp, GetBitContext *gb)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
main external API structure.
static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
VLC_TYPE(* table)[2]
code, bits
AVCodec ff_motionpixels_decoder
static av_cold int mp_decode_end(AVCodecContext *avctx)