Go to the documentation of this file.
29 #include <stdatomic.h>
40 #define VP8_MAX_QUANT 127
133 #define EDGE_EMU_LINESIZE 32
147 #define MAX_THREADS 8
uint8_t intra4x4_pred_mode_mb[16]
int coeff_partition_size[8]
AVPixelFormat
Pixel format.
uint8_t * intra4x4_pred_mode_top
VP8Macroblock * macroblocks_base
uint8_t(* top_border)[16+8+8]
struct VP8Context::@171 filter
VP8ThreadData * thread_data
This structure describes decoded (raw) audio or video data.
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
vp8_mc_func put_pixels_tab[3][3][3]
@ VP8_SPLITMVMODE_4x4
4x4 blocks of 4x4px each
uint8_t enabled
whether each mb can have a different strength based on mode/ref
@ VP8_SPLITMVMODE_8x8
2x2 blocks of 8x8px each
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
uint8_t feature_enabled[4]
Macroblock features (VP7)
int ff_vp8_decode_free(AVCodecContext *avctx)
struct VP8Context::@175 coder_state_at_header_end
@ VP8_SPLITMVMODE_16x8
2 16x8 blocks (vertical)
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
VP56RangeCoder c
header context, includes mb modes and motion vectors
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
struct VP8Context::@170 segmentation
Base parameters for segmentation, i.e.
VP8FilterStrength * filter_strength
uint8_t token[4][16][3][NUM_DCT_TOKENS - 1]
@ VP8_SPLITMVMODE_8x16
2 8x16 blocks (horizontal)
struct VP8Context::@173 quant
int ff_vp8_decode_init(AVCodecContext *avctx)
int fade_present
Fade bit present in bitstream (VP7)
int8_t filter_level[4]
base loop filter level
int16_t luma_dc_qmul[2]
luma dc-only block quant
AVBufferRef * hwaccel_priv_buf
VP8Macroblock * macroblocks
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
uint8_t feature_index_prob[4][3]
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded.
uint8_t intra4x4_pred_mode_left[4]
#define EDGE_EMU_LINESIZE
uint8_t colorspace
0 is the only value allowed (meaning bt601)
int mb_layout
This describes the macroblock memory layout.
int update_last
update VP56_FRAME_PREVIOUS with the current one
VP56RangeCoder coeff_partition[8]
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
uint8_t update_feature_data
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
#define DECLARE_ALIGNED(n, t, v)
int header_partition_size
VP8Frame * next_framep[4]
uint8_t fullrange
whether we can skip clamping in dsp functions
uint8_t feature_present_prob[4]
struct VP8Context::@174 lf_delta
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
@ VP8_SPLITMVMODE_NONE
(only used in prediction) no split MVs
int8_t sign_bias[4]
one state [0, 1] per ref frame type
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
main external API structure.
uint8_t intra4x4_pred_mode_top[4]
static pthread_mutex_t lock
Context for storing H.264 prediction functions.
A reference to a data buffer.
uint8_t feature_value[4][4]
enum AVPixelFormat pix_fmt
This structure stores compressed data.
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
struct VP8Context::@176 prob[2]
These are all of the updatable probabilities for binary decisions.
struct VP8Context::@172 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
int(* cond)(enum AVPixelFormat pix_fmt)
void * hwaccel_picture_private