63 "Insufficient extradata - need at least %d bytes, got %d\n",
76 mb_size = ((w + 15) >> 4) * ((
h + 15) >> 4);
91 for(j = 0; j < 4; j+= 2){
97 for(k = 0; k < 2; k++){
131 return rv30_p_types[
code];
133 return rv30_b_types[
code];
137 const int stride,
const int lim)
142 for(i = 0; i < 4; i++){
143 diff = ((src[-2*
step] - src[1*
step]) - (src[-1*step] - src[0*step])*4) >> 3;
144 diff = av_clip(diff, -lim, lim);
157 int loc_lim, cur_lim, left_lim = 0, top_lim = 0;
160 for(mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_pos++){
172 for(mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_pos++){
176 for(j = 0; j < 16; j += 4){
178 for(i = !mb_x; i < 4; i++, Y += 4){
183 else if(!i && r->
deblock_coefs[mb_pos - 1] & (1 << (ij + 3)))
191 for(k = 0; k < 2; k++){
192 int cur_cbp, left_cbp = 0;
193 cur_cbp = (r->
cbp_chroma[mb_pos] >> (k*4)) & 0xF;
195 left_cbp = (r->
cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
196 for(j = 0; j < 8; j += 4){
198 for(i = !mb_x; i < 2; i++, C += 4){
199 int ij = i + (j >> 1);
201 if (cur_cbp & (1 << ij))
203 else if(!i && left_cbp & (1 << (ij + 1)))
205 else if( i && cur_cbp & (1 << (ij - 1)))
214 for(mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_pos++){
218 for(j = 4*!row; j < 16; j += 4){
220 for(i = 0; i < 4; i++, Y += 4){
233 for(k = 0; k < 2; k++){
234 int cur_cbp, top_cbp = 0;
235 cur_cbp = (r->
cbp_chroma[mb_pos] >> (k*4)) & 0xF;
238 for(j = 4*!row; j < 8; j += 4){
240 for(i = 0; i < 2; i++, C += 4){
241 int ij = i + (j >> 1);
245 else if(!j && top_cbp & (1 << (ij + 2)))
247 else if( j && cur_cbp & (1 << (ij - 2)))
static const uint8_t rv30_luma_dc_quant[32]
DC quantizer mapping for RV30.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void flush(AVCodecContext *avctx)
int coded_width
Bitstream width / height, may be different from width/height e.g.
#define IS_SEPARATE_DC(a)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
B-frame macroblock, forward prediction.
static av_cold int init(AVCodecContext *avctx)
static const uint8_t rv30_loop_filt_lim[32]
Loop filter limits are taken from this table.
Bidirectionally predicted B-frame macroblock, no motion vectors.
static av_cold int rv30_decode_init(AVCodecContext *avctx)
Initialize decoder.
RV30 and RV40 decoder common data declarations.
const uint8_t * luma_dc_quant_p
luma subblock DC quantizer for interframes
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
Decode 4x4 intra types array.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint16_t * deblock_coefs
deblock coefficients for each macroblock
static int rv30_decode_mb_info(RV34DecContext *r)
Decode macroblock information.
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
int quant
quantizer used for this slice
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint8_t rv30_itype_from_context[900]
This table is used for retrieving the current intra type based on its neighbors and adjustment provid...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * name
Name of the codec implementation.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void ff_mpeg_flush(AVCodecContext *avctx)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int(* parse_slice_header)(struct RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
Intra macroblock with DCs in a separate 4x4 block.
Picture * current_picture_ptr
pointer to the current picture
static const uint8_t rv30_itype_code[9 *9 *2]
This table is used for storing the differences between the predicted and the real intra type...
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
int(* decode_mb_info)(struct RV34DecContext *r)
essential slice information
Libavcodec external API header.
static void rv30_loop_filter(RV34DecContext *r, int row)
ptrdiff_t linesize
line size, in bytes, may be different from width
main external API structure.
int height
picture size. must be a multiple of 16
static void rv30_weak_loop_filter(uint8_t *src, const int step, const int stride, const int lim)
P-frame macroblock, 8x8 motion compensation partitions.
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits1(GetBitContext *s)
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
int intra_types_stride
block types array stride
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static enum AVPixelFormat pix_fmts[]
int(* decode_intra_types)(struct RV34DecContext *r, GetBitContext *gb, int8_t *dst)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
P-frame macroblock, one motion frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
struct AVCodecContext * avctx
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
B-frame macroblock, backward prediction.
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static int rv30_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
const uint8_t * luma_dc_quant_i
luma subblock DC quantizer for intraframes
uint32_t * mb_type
types and macros are defined in mpegutils.h
int type
slice type (intra, inter)
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
miscellaneous RV30 tables
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int rv30
indicates which RV variant is currently decoded
AVPixelFormat
Pixel format.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* loop_filter)(struct RV34DecContext *r, int row)
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
uint8_t * cbp_chroma
CBP values for chroma subblocks.