27 #define BITSTREAM_READER_LE 39 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19,
40 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56,
41 14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56,
42 68,109,103, 77, 24, 35, 55, 64, 81,104,113, 92,
43 49, 64, 78, 87,103,121,120,101, 72, 92, 95, 98,
48 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66,
49 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99,
50 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
51 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
52 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
89 int luma_quant_matrix[64];
90 int chroma_quant_matrix[64];
106 int len = 0, skip = 0,
max;
152 len = ((
show_bits(gb, 5) & 0x10) | 0xA0) >> 4;
163 max = 1 << (len - 1);
180 }
else if (mode == 0) {
197 const int *quant_matrix,
int *skip,
int *dc_level)
204 for (
int i = 0;
i < 64;
i++) {
214 for (
int k = 0; k < rskip; k++)
215 block[64 * k] = *dc_level * quant_matrix[0];
227 block[0] = (
i == 0 ? *dc_level :
level) * quant_matrix[
i];
238 const int *quant_matrix,
int *skip,
247 for (
int i = 0;
i < 64;
i++) {
263 block[0] = level * quant_matrix[
i];
274 const int *quant_matrix,
int *skip,
int *dc_level)
281 memset(block, 0,
sizeof(s->
block));
291 block[scantable[0]] = offset + *dc_level * quant_matrix[0];
293 for (
int i = 1;
i < 64;) {
297 rskip =
FFMIN(*skip, 64 -
i);
305 block[scantable[
i]] = level * quant_matrix[
i];
317 int ret, skip = 0, dc_level = 0;
363 const int *quant_matrix,
int *skip,
370 memset(block, 0,
sizeof(s->
block));
372 for (
int i = 0;
i < 64;) {
376 rskip =
FFMIN(*skip, 64 -
i);
384 block[scantable[
i]] = level * quant_matrix[
i];
418 int shift = plane == 0;
427 if (orig_mv_x >= -32) {
428 if (
y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
429 x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
437 for (
int i = 0;
i < 64;
i++)
448 }
else if (s->
flags & 2) {
451 int shift = plane == 0;
464 if (orig_mv_x >= -32) {
465 if (
y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
466 x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
474 for (
int i = 0;
i < 64;
i++)
485 }
else if (s->
flags & 1) {
537 double f = 1.0 -
fabs(qscale);
541 for (
int i = 0;
i < 64;
i++) {
542 luma[
i] =
FFMAX(1, 16 * f);
543 chroma[
i] =
FFMAX(1, 16 * f);
546 for (
int i = 0;
i < 64;
i++) {
547 luma[
i] =
FFMAX(1, 16 - qscale * 32);
548 chroma[
i] =
FFMAX(1, 16 - qscale * 32);
553 for (
int i = 0;
i < 64;
i++) {
558 for (
int i = 0;
i < 64;
i++) {
565 for (
int i = 0;
i < 64;
i++) {
581 for (
int y = 0;
y < avctx->
height;
y++) {
582 for (
int x = 0;
x < avctx->
width;
x++) {
583 dst[
x*3+0] = bytestream2_get_byteu(gbyte) +
r;
585 dst[
x*3+1] = bytestream2_get_byteu(gbyte) +
g;
587 dst[
x*3+2] = bytestream2_get_byteu(gbyte) +
b;
598 int ylinesize,
int ulinesize,
int vlinesize,
600 int *nx,
int *ny,
int *np,
int w,
int h)
606 int x = *nx,
y = *ny,
pos = *np;
609 y0dst[2*x+0] += fill[0];
610 y0dst[2*x+1] += fill[1];
611 y1dst[2*x+0] += fill[2];
612 y1dst[2*x+1] += fill[3];
614 }
else if (pos == 1) {
623 y0dst -= 2*ylinesize;
624 y1dst -= 2*ylinesize;
628 y0dst[2*x+0] += fill[2];
629 y0dst[2*x+1] += fill[3];
631 }
else if (pos == 2) {
632 y1dst[2*x+0] += fill[0];
633 y1dst[2*x+1] += fill[1];
642 y0dst -= 2*ylinesize;
643 y1dst -= 2*ylinesize;
664 int runlen,
y = 0,
x = 0;
669 code = bytestream2_peek_le32(gbyte);
670 runlen =
code & 0xFFFFFF;
672 if (
code >> 24 == 0x77) {
675 for (
int i = 0;
i < 4;
i++)
676 fill[
i] = bytestream2_get_byte(gbyte);
681 for (
int i = 0;
i < 4;
i++) {
684 if (
x >= frame->
width * 3) {
694 for (
int i = 0;
i < 4;
i++)
695 fill[
i] = bytestream2_get_byte(gbyte);
697 for (
int i = 0;
i < 4;
i++) {
700 if (
x >= frame->
width * 3) {
720 int runlen,
y = 0,
x = 0,
pos = 0;
725 code = bytestream2_peek_le32(gbyte);
726 runlen =
code & 0xFFFFFF;
728 if (
code >> 24 == 0x77) {
731 for (
int i = 0;
i < 4;
i++)
732 fill[
i] = bytestream2_get_byte(gbyte);
747 for (
int i = 0;
i < 4;
i++)
748 fill[
i] = bytestream2_get_byte(gbyte);
770 uint8_t ly0 = 0, ly1 = 0, ly2 = 0, ly3 = 0, lu = 0, lv = 0;
773 for (
int x = 0;
x < avctx->
width / 2;
x++) {
774 y0dst[
x*2+0] = bytestream2_get_byte(gbyte) + ly0;
776 y0dst[
x*2+1] = bytestream2_get_byte(gbyte) + ly1;
778 y1dst[
x*2+0] = bytestream2_get_byte(gbyte) + ly2;
780 y1dst[
x*2+1] = bytestream2_get_byte(gbyte) + ly3;
782 udst[
x] = bytestream2_get_byte(gbyte) + lu;
784 vdst[
x] = bytestream2_get_byte(gbyte) + lv;
849 for (
int i = 0;
i < nb_mvs;
i++) {
857 for (
int i = 0;
i < nb_mvs;
i++) {
922 if (idx < 256 && idx >= 0) {
924 }
else if (idx >= 0) {
925 get_tree_codes(codes, nodes, nodes[idx].child[0], pfx + (0 << bitpos), bitpos + 1);
926 get_tree_codes(codes, nodes, nodes[idx].child[1], pfx + (1
U << bitpos), bitpos + 1);
932 int zlcount = 0, curlen, idx, nindex, last, llast;
933 int blcounts[32] = { 0 };
939 for (
int i = 0;
i < 256;
i++) {
940 int bitlen = bitlens[
i];
941 int blcount = blcounts[bitlen];
943 zlcount += bitlen < 1;
944 syms[(bitlen << 8) + blcount] =
i;
948 for (
int i = 0;
i < 512;
i++) {
953 for (
int i = 0;
i < 256;
i++) {
954 node_idx[
i] = 257 +
i;
962 for (curlen = 1; curlen < 32; curlen++) {
963 if (blcounts[curlen] > 0) {
964 int max_zlcount = zlcount + blcounts[curlen];
966 for (
int i = 0; zlcount < 256 && zlcount < max_zlcount; zlcount++,
i++) {
967 int p = node_idx[nindex - 1 + 512];
968 int ch = syms[256 * curlen +
i];
973 if (nodes[p].child[0] == -1) {
988 p = node_idx[nindex - 1 + 512];
990 if (nodes[p].child[0] == -1) {
1002 for (
int i = 0;
i < idx;
i++)
1003 node_idx[512 +
i] = old_idx[
i];
1017 uint32_t new_codes[256];
1020 uint32_t codes[256];
1027 for (
int i = 0;
i < 256;
i++) {
1029 bits[nb_codes] = bitlen[
i];
1030 codes[nb_codes] = new_codes[
i];
1031 symbols[nb_codes] =
i;
1073 for (
int i = 0;
i < 256;
i++) {
1078 for (
int i = 0;
i < 256;
i++)
1104 unsigned compressed_size;
1113 header = bytestream2_get_le32(gbyte);
1114 s->
fflags = bytestream2_get_le32(gbyte);
1141 }
else if (!s->
dct) {
1147 w = bytestream2_get_le32(gbyte);
1148 h = bytestream2_get_le32(gbyte);
1149 if (w == INT32_MIN || h == INT32_MIN)
1160 width = avctx->
width;
1162 if (w < width || h < height || w & 7 || h & 7)
1175 for (
int i = 0;
i < 3;
i++)
1176 s->
size[
i] = bytestream2_get_le32(gbyte);
1181 compressed_size = avpkt->
size;
1184 if (s->
size[0] < 0 || s->
size[1] < 0 || s->
size[2] < 0 ||
1185 skip + s->
size[0] + s->
size[1] + s->
size[2] > compressed_size) {
1196 else if (!s->
dct && s->
rgb)
1205 if (!(s->
flags & 2)) {
1213 }
else if (!s->
dct && !s->
rgb) {
1247 if (!s->
rgb && !s->
dct) {
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int decode_runlen(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static int build_huff(const uint8_t *bitlen, VLC *vlc)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
static int read_code(GetBitContext *gb, int *oskip, int *level, int *map, int mode)
static int decode_runlen_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
static int decode_huffman2(AVCodecContext *avctx, int header, int size)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int chroma_quant_matrix[64]
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static int make_new_tree(const uint8_t *bitlens, uint32_t *codes)
static double cb(void *priv, double x, double y)
static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static av_always_inline int fill_pixels(uint8_t **y0, uint8_t **y1, uint8_t **u, uint8_t **v, int ylinesize, int ulinesize, int vlinesize, uint8_t *fill, int *nx, int *ny, int *np, int w, int h)
The exact code depends on how similar the blocks are and how related they are to the block
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
GLsizei GLboolean const GLfloat * value
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define u(width, name, range_min, range_max)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
static int decode_inter_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
static const uint8_t header[24]
static av_cold int decode_init(AVCodecContext *avctx)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static int get_bits_left(GetBitContext *gb)
static int decode_intra_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, int plane)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int decode_raw_intra_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
static __device__ float fabs(float a)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static int decode_raw_intra(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
const char * name
Name of the codec implementation.
unsigned padded_output_size
static av_cold int decode_close(AVCodecContext *avctx)
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
int flags
A combination of AV_PKT_FLAG values.
enum AVPictureType pict_type
Picture type of the frame.
static int decode_intra_blocks(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
int width
picture width / height.
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
void(* idct_add)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, AVFrame *prev, int plane)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
uint8_t idct_permutation[64]
IDCT input permutation.
packed RGB 8:8:8, 24bpp, BGRBGR...
static void get_tree_codes(uint32_t *codes, Node *nodes, int idx, uint32_t pfx, int bitpos)
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
main external API structure.
static const uint8_t unscaled_luma[64]
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
static const uint8_t unscaled_chroma[64]
const uint8_t ff_zigzag_direct[64]
const VDPAUPixFmtMap * map
static void compute_quant_matrix(AGMContext *s, double qscale)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static int decode_inter_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *map)
static int decode_intra_block(AGMContext *s, GetBitContext *gb, const int *quant_matrix, int *skip, int *dc_level)
int luma_quant_matrix[64]
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
VLC_TYPE(* table)[2]
code, bits
int key_frame
1 -> keyframe, 0-> not
static const uint8_t * align_get_bits(GetBitContext *s)
static void decode_flush(AVCodecContext *avctx)
void(* idct)(int16_t *block)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
static double val(void *priv, double ch)
This structure stores compressed data.
static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
mode
Use these values in ebur128_init (or'ed).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* add_pixels_clamped)(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size)