54         for (i = e - 1; i >= 0; i--)
 
   57         e = -(is_signed && 
get_rac(c, state + 11 + 
FFMIN(e, 10))); 
 
   74     while (i < state->error_sum) { 
 
   80     ff_dlog(
NULL, 
"v:%d bias:%d error:%d drift:%d count:%d k:%d",
 
   84     if (k == 0 && 2 * state->
drift <= -state->
count)
 
   87     v ^= ((2 * state->
drift + state->
count) >> 31);
 
   99                                          int plane_index, 
int bits)
 
  110         for (x = 0; x < w; x++) {
 
  112             for (i=0; i<
bits; i++) {
 
  121     for (x = 0; x < w; x++) {
 
  122         int diff, context, sign;
 
  124         context = 
get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
 
  136             if (context == 0 && run_mode == 0)
 
  140                 if (run_count == 0 && run_mode == 1) {
 
  143                         if (x + run_count <= w)
 
  168             ff_dlog(s->
avctx, 
"count:%d index:%d, mode:%d, x:%d pos:%d\n",
 
  175         sample[1][x] = av_mod_uintp2(
predict(sample[1] + x, sample[0] + x) + diff, bits);
 
  181                          int w, 
int h, 
int stride, 
int plane_index)
 
  192     for (y = 0; y < 
h; y++) {
 
  193         int16_t *
temp = sample[0]; 
 
  195         sample[0] = sample[1];
 
  198         sample[1][-1] = sample[0][0];
 
  199         sample[0][w]  = sample[0][w - 1];
 
  204             for (x = 0; x < w; x++)
 
  205                 src[x + stride * y] = sample[1][x];
 
  209                 for (x = 0; x < w; x++) {
 
  210                     ((uint16_t*)(src + stride*y))[x] = sample[1][x];
 
  213                 for (x = 0; x < w; x++) {
 
  230     for (x = 0; x < 4; x++) {
 
  239     for (y = 0; y < 
h; y++) {
 
  241             int16_t *
temp = sample[p][0]; 
 
  243             sample[p][0] = sample[p][1];
 
  246             sample[p][1][-1]= sample[p][0][0  ];
 
  247             sample[p][0][ w]= sample[p][0][w-1];
 
  253         for (x = 0; x < w; x++) {
 
  254             int g = sample[0][1][x];
 
  255             int b = sample[1][1][x];
 
  256             int r = sample[2][1][x];
 
  257             int a = sample[3][1][x];
 
  268                 *((uint32_t*)(src[0] + x*4 + stride[0]*y)) = b + (g<<8) + (r<<16) + (a<<24);
 
  270                 *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b;
 
  271                 *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g;
 
  272                 *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r;
 
  283     memset(state, 128, 
sizeof(state));
 
  324     } 
else if (ps == 2) {
 
  327     } 
else if (ps == 3) {
 
  387             memcpy(pdst, psrc, 
sizeof(*pdst));
 
  432     if (f->colorspace == 0) {
 
  435         const int cx            = x >> f->chroma_h_shift;
 
  436         const int cy            = 
y >> f->chroma_v_shift;
 
  439         if (f->chroma_planes) {
 
  440             decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
 
  441             decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
 
  443         if (fs->transparency)
 
  444             decode_plane(fs, p->data[3] + ps*x + 
y*p->linesize[3], 
width, 
height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2);
 
  446         uint8_t *planes[3] = { p->data[0] + ps * x + 
y * p->linesize[0],
 
  447                                p->data[1] + ps * x + 
y * p->linesize[1],
 
  448                                p->data[2] + ps * x + 
y * p->linesize[2] };
 
  451     if (fs->ac && f->version > 2) {
 
  454         v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
 
  457             fs->slice_damaged = 1;
 
  474     memset(state, 128, 
sizeof(state));
 
  476     for (v = 0; i < 128; v++) {
 
  479         if (len > 128 - i || !len)
 
  483             quant_table[i] = scale * 
v;
 
  488     for (i = 1; i < 128; i++)
 
  489         quant_table[256 - i] = -quant_table[i];
 
  490     quant_table[128] = -quant_table[127];
 
  501     for (i = 0; i < 5; i++) {
 
  503         if (context_count > 32768
U) {
 
  507     return (context_count + 1) / 2;
 
  518     memset(state2, 128, 
sizeof(state2));
 
  519     memset(state, 128, 
sizeof(state));
 
  537         for (i = 1; i < 256; i++)
 
  607                "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
 
  629     memset(state, 128, 
sizeof(state));
 
  641             for (i = 1; i < 256; i++)
 
  647         chroma_planes       = 
get_rac(c, state);
 
  650         transparency        = 
get_rac(c, state);
 
  666         if (chroma_h_shift > 4
U || chroma_v_shift > 4
U) {
 
  668                    chroma_h_shift, chroma_v_shift);
 
  747                    "chroma subsampling not supported in this colorspace\n");
 
  774         if (context_count < 0) {
 
  785             int trailer = 3 + 5*!!f->
ec;
 
  829                            "quant_table_index out of range\n");
 
  875     int buf_size        = avpkt->
size;
 
  910                    "Cannot decode non-keyframe without valid keyframe\n");
 
  925     buf_p = buf + buf_size;
 
  928         int trailer = 3 + 5*!!f->
ec;
 
  981             for (j = 0; j < 4; j++) {
 
 1045     fsdst->
ac                  = fsrc->
ac;
 
 1048     fsdst->
ec                  = fsrc->
ec;
 
 1080         memcpy(fdst, fsrc, 
sizeof(*fdst));
 
static av_always_inline int fold(int diff, int bits)
const uint8_t ff_log2_run[41]
#define AV_PIX_FMT_YUVA422P16
#define AVERROR_INVALIDDATA
Invalid data found when processing input. 
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data. 
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) 
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits. 
#define AV_LOG_WARNING
Something somehow does not look correct. 
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
static av_cold int init(AVCodecContext *avctx)
static int decode_slice(AVCodecContext *c, void *arg)
#define MAX_CONTEXT_INPUTS
#define AV_PIX_FMT_GBRP10
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx. 
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format. 
FF Video Codec 1 (a lossless codec) 
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures. 
static av_always_inline void predict(PredictorState *ps, float *coef, int output_enable)
#define av_assert0(cond)
assert() equivalent, that is always enabled. 
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) 
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed. 
static int get_rac(RangeCoder *c, uint8_t *const state)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values. 
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code. 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame. 
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables. 
#define AV_PIX_FMT_YUVA420P9
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
av_cold int ff_ffv1_close(AVCodecContext *avctx)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid. 
static double av_q2d(AVRational a)
Convert rational to double. 
static int get_bits_count(const GetBitContext *s)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
bitstream reader API header. 
#define AV_PIX_FMT_YUV444P16
static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3])
int interlaced_frame
The content of the picture is interlaced. 
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define AV_PIX_FMT_YUVA420P16
high precision timer, useful to profile code 
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are. 
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) 
static int init_thread_copy(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered. 
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs. 
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256]
int skip_alpha
Skip processing alpha if supported by codec. 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers. 
#define AV_PIX_FMT_YUVA444P16
simple assert() macros that are a bit more flexible than ISO C assert(). 
const char * name
Name of the codec implementation. 
#define AV_PIX_FMT_YUV444P10
int ff_ffv1_allocate_initial_states(FFV1Context *f)
static const uint8_t offset[127][2]
Libavcodec external API header. 
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading. 
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data. 
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified. 
int ac
1=range coder <-> 0=golomb rice 
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
#define AV_PIX_FMT_YUV422P9
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
uint8_t state_transition[256]
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
enum AVPictureType pict_type
Picture type of the frame. 
#define AV_PIX_FMT_GRAY16
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code. 
#define FF_CEIL_RSHIFT(a, b)
static float quant_table[96]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready. 
static int get_context(PlaneContext *p, int16_t *src, int16_t *last, int16_t *last2)
static void update_vlc_state(VlcState *const state, const int v)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block. 
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P10
int ac_byte_count
number of bytes used for AC coding 
static av_always_inline void decode_line(FFV1Context *s, int w, int16_t *sample[2], int plane_index, int bits)
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
static int read_header(FFV1Context *f)
static const float pred[4]
void * av_memdup(const void *p, size_t size)
Duplicate the buffer p. 
#define AV_PIX_FMT_YUV420P16
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading. 
int context_count[MAX_QUANT_TABLES]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line. 
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) 
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs. 
main external API structure. 
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. 
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) 
rational number numerator/denominator 
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext. 
uint16_t step_minus1
Number of elements between 2 horizontally consecutive pixels minus 1. 
#define AV_PIX_FMT_YUV420P9
int allocate_progress
Whether to allocate progress for frame threading. 
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes. 
#define FF_DEBUG_PICT_INFO
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table. 
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 
common internal api header. 
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P9
uint8_t(* state)[CONTEXT_SIZE]
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) 
uint8_t * bytestream_start
static av_cold int decode_init(AVCodecContext *avctx)
PlaneContext plane[MAX_PLANES]
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things. 
struct FFV1Context * fsrc
int top_field_first
If the content is interlaced, is top field displayed first. 
struct AVCodecInternal * internal
Private context used for internal data. 
int key_frame
1 -> keyframe, 0-> not 
struct FFV1Context * slice_context[MAX_SLICES]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) 
enum AVFieldOrder field_order
Field order. 
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
enum AVColorSpace colorspace
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1). 
This structure stores compressed data. 
static int read_extra_header(FFV1Context *f)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later. 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators. 
#define AV_PIX_FMT_YUV422P16
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value. 
#define AV_PIX_FMT_0RGB32