45 #define EPIC_PIX_STACK_SIZE 1024 46 #define EPIC_PIX_STACK_MAX (EPIC_PIX_STACK_SIZE - 1) 63 8, 6, 5, 8, 12, 20, 26, 31,
64 6, 6, 7, 10, 13, 29, 30, 28,
65 7, 7, 8, 12, 20, 29, 35, 28,
66 7, 9, 11, 15, 26, 44, 40, 31,
67 9, 11, 19, 28, 34, 55, 52, 39,
68 12, 18, 28, 32, 41, 52, 57, 46,
69 25, 32, 39, 44, 52, 61, 60, 51,
70 36, 46, 48, 49, 56, 50, 52, 50
74 9, 9, 12, 24, 50, 50, 50, 50,
75 9, 11, 13, 33, 50, 50, 50, 50,
76 12, 13, 28, 50, 50, 50, 50, 50,
77 24, 33, 50, 50, 50, 50, 50, 50,
78 50, 50, 50, 50, 50, 50, 50, 50,
79 50, 50, 50, 50, 50, 50, 50, 50,
80 50, 50, 50, 50, 50, 50, 50, 50,
81 50, 50, 50, 50, 50, 50, 50, 50,
95 #define EPIC_HASH_SIZE 256 163 const uint8_t *val_table,
int nb_codes,
166 uint8_t huff_size[256] = { 0 };
167 uint16_t huff_code[256];
168 uint16_t huff_sym[256];
173 for (i = 0; i < 256; i++)
174 huff_sym[i] = i + 16 * is_ac;
177 huff_sym[0] = 16 * 256;
180 huff_code, 2, 2, huff_sym, 2, 2, 0);
216 for (i = 0; i < 2; i++) {
227 const uint8_t *src_end = src + src_size;
230 while (src < src_end) {
235 if (x == 0xFF && !*src)
238 *dst_size = dst - dst_start;
242 int plane, int16_t *
block)
245 const int is_chroma = !!plane;
257 dc = dc * qmat[0] + c->
prev_dc[plane];
283 out[ridx] = av_clip_uint8(Y + (91881 * V + 32768 >> 16));
284 out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
285 out[2 - ridx] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
295 int mb_w, mb_h, mb_x, mb_y,
i, j;
299 const int ridx = swapuv ? 2 : 0;
311 mb_h = (height + 15) >> 4;
314 num_mbs = mb_w * mb_h * 4;
316 for (i = 0; i < 3; i++)
321 for (mb_y = 0; mb_y < mb_h; mb_y++) {
322 for (mb_x = 0; mb_x < mb_w; mb_x++) {
323 if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
324 !mask[mb_x * 2 + mask_stride] &&
325 !mask[mb_x * 2 + 1 + mask_stride]) {
329 for (j = 0; j < 2; j++) {
330 for (i = 0; i < 2; i++) {
331 if (mask && !mask[mb_x * 2 + i + j * mask_stride])
335 c->
block[i + j * 2])) != 0)
340 for (i = 1; i < 3; i++) {
346 for (j = 0; j < 16; j++) {
347 uint8_t *
out = dst + bx * 3 + (by + j) * dst_stride;
348 for (i = 0; i < 16; i++) {
351 Y = c->
block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
352 U = c->
block[4][(i >> 1) + (j >> 1) * 8] - 128;
353 V = c->
block[5][(i >> 1) + (j >> 1) * 8] - 128;
354 yuv2rgb(out + i * 3, ridx, Y, U, V);
365 mask += mask_stride * 2;
371 #define LOAD_NEIGHBOURS(x) \ 372 W = curr_row[(x) - 1]; \ 373 N = above_row[(x)]; \ 374 WW = curr_row[(x) - 2]; \ 375 NW = above_row[(x) - 1]; \ 376 NE = above_row[(x) + 1]; \ 377 NN = above2_row[(x)]; \ 378 NNW = above2_row[(x) - 1]; \ 379 NWW = above_row[(x) - 2]; \ 380 NNE = above2_row[(x) + 1] 382 #define UPDATE_NEIGHBOURS(x) \ 388 NE = above_row[(x) + 1]; \ 389 NNE = above2_row[(x) + 1] 400 h = (h * 33) ^ ((key >> 24) & 0xFF);
401 h = (h * 33) ^ ((key >> 16) & 0xFF);
402 h = (h * 33) ^ ((key >> 8) & 0xFF);
403 h = (h * 33) ^ (key & 0xFF);
410 memset(hash, 0,
sizeof(*hash));
419 if (bucket[i].pix_id == key)
443 memset(ret, 0,
sizeof(*ret));
462 new_elem->
pixel = pix;
464 hash_elem->
list = new_elem;
504 if (dc->
stack[i] == pix)
510 #define TOSIGNED(val) (((val) >> 1) ^ -((val) & 1)) 513 int N,
int W,
int NW)
520 const uint32_t *curr_row,
521 const uint32_t *above_row)
525 int GN, GW, GNW,
R,
G,
B;
530 NW = above_row[x - 1];
541 ((NW >>
R_shift) & 0xFF) - GNW);
546 ((NW >>
B_shift) & 0xFF) - GNW);
549 pred = curr_row[x - 1];
563 if (R<0 || G<0 || B<0 || R > 255 || G > 255 || B > 255) {
572 uint32_t *pPix, uint32_t pix)
583 const uint32_t *curr_row,
584 const uint32_t *above_row, uint32_t *pPix)
597 pix = curr_row[x - 1];
614 const uint32_t *curr_row,
615 const uint32_t *above_row,
616 const uint32_t *above2_row,
617 uint32_t *pPix,
int *pRun)
619 int idx, got_pixel = 0, WWneW, old_WWneW = 0;
620 uint32_t
W, WW,
N,
NN, NW, NE, NWW, NNW, NNE;
630 idx = (WW !=
W) << 7 |
655 NWneW = *pRun ? NWneW : NW !=
W;
658 switch (((NW != N) << 2) | (NWneW << 1) | WWneW) {
667 (*pRun ? old_WWneW : WW !=
W) << 7 |
692 if (x + *pRun >= tile_width - 1)
697 if (!NWneW && NW == N && N == NE) {
699 int start_pos = x + *pRun;
702 uint32_t pix = above_row[start_pos + 1];
703 for (pos = start_pos + 2; pos < tile_width; pos++)
704 if (!(above_row[pos] == pix))
706 run = pos - start_pos - 1;
707 idx = av_ceil_log2(run);
713 for (pos = idx - 1, rle = 0, flag = 0; pos >= 0; pos--) {
714 if ((1 << pos) + rle < run &&
725 if (x + *pRun >= tile_width - 1)
749 uint32_t *pPix, uint32_t pix)
760 int tile_width,
const uint32_t *curr_row,
761 const uint32_t *above_row, uint32_t *pPix)
767 uint32_t NW = above_row[x - 1];
776 if (pos < tile_width - 1 && y) {
777 uint32_t NE = above_row[pos + 1];
792 if (!hash_elem || !hash_elem->
list)
795 list = hash_elem->
list;
800 if (list != hash_elem->
list) {
817 int tile_width,
int stride)
821 uint32_t *curr_row =
NULL, *above_row =
NULL, *above2_row;
823 for (y = 0; y < tile_height; y++, out +=
stride) {
824 above2_row = above_row;
825 above_row = curr_row;
826 curr_row = (uint32_t *) out;
832 pix = curr_row[x - 1];
834 if (y >= 1 && x >= 2 &&
835 pix != curr_row[x - 2] && pix != above_row[x - 1] &&
836 pix != above_row[x - 2] && pix != above_row[x] &&
844 if (y < 2 || x < 2 || x == tile_width - 1) {
850 above2_row, &pix, &run);
856 tile_width, curr_row,
858 uint32_t ref_pix = curr_row[x - 1];
873 for (; run > 0; x++, run--)
887 int extrabytes, tile_width, tile_height, awidth, aheight;
897 for (extrabytes = 0; (prefix &
mask) && (extrabytes < 7); extrabytes++)
899 if (extrabytes > 3 || src_size < extrabytes) {
904 els_dsize = prefix & ((0x80 >> extrabytes) - 1);
905 while (extrabytes-- > 0) {
906 els_dsize = (els_dsize << 8) | *src++;
910 if (src_size < els_dsize) {
912 els_dsize, src_size);
918 awidth =
FFALIGN(tile_width, 16);
919 aheight =
FFALIGN(tile_height, 16);
928 uint8_t tr_r, tr_g, tr_b, *buf;
931 memset(&c->
ec, 0,
sizeof(c->
ec));
941 "ePIC: couldn't decode transparency pixel!\n");
954 "ePIC: tile decoding failed, frame=%d, tile_x=%d, tile_y=%d\n",
963 for (j = 0; j < tile_height; j++) {
965 in = (uint32_t *) buf;
966 for (i = 0; i < tile_width; i++) {
976 if (src_size > els_dsize) {
979 int bstride =
FFALIGN(tile_width, 16) >> 3;
984 src_size -= els_dsize;
990 (aheight >> 3) * bstride *
sizeof(*c->
kempf_flags));
991 for (j = 0; j < tile_height; j += 8) {
992 for (i = 0; i < tile_width; i += 8) {
994 for (k = 0; k < 8 * 8; k++) {
995 if (in[i + (k & 7) + (k >> 3) * estride] == tr) {
1014 for (j = 0; j < tile_height; j++) {
1015 for (i = 0; i < tile_width; i++)
1017 memcpy(dst + i * 3, jpg + i * 3, 3);
1035 const uint8_t *jpeg_tile,
int tile_stride,
1037 const uint8_t *pal,
int npal,
int tidx)
1042 int align_width =
FFALIGN(width, 16);
1047 if (npal <= 2) nb = 1;
1048 else if (npal <= 4) nb = 2;
1049 else if (npal <= 16) nb = 4;
1052 for (j = 0; j <
height; j++, dst +=
stride, jpeg_tile += tile_stride) {
1055 for (i = 0; i <
width; i++) {
1058 memcpy(dst + i * 3, pal + col * 3, 3);
1060 memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
1072 int hdr, zsize, npal, tidx = -1,
ret;
1074 const uint8_t *src_end = src + src_size;
1078 int nblocks, cblocks, bstride;
1079 int bits, bitbuf, coded;
1090 sub_type = hdr >> 5;
1091 if (sub_type == 0) {
1093 memcpy(transp, src, 3);
1096 for (i = 0; i <
width; i++)
1097 memcpy(dst + i * 3, transp, 3);
1099 }
else if (sub_type == 1) {
1104 if (sub_type != 2) {
1105 memcpy(transp, src, 3);
1109 if (src_end - src < npal * 3)
1111 memcpy(pal, src, npal * 3);
1113 if (sub_type != 2) {
1114 for (i = 0; i < npal; i++) {
1115 if (!memcmp(pal + i * 3, transp, 3)) {
1122 if (src_end - src < 2)
1124 zsize = (src[0] << 8) | src[1];
1127 if (src_end - src < zsize + (sub_type != 2))
1135 if (sub_type == 2) {
1137 NULL, 0, width, height, pal, npal, tidx);
1141 nblocks = *src++ + 1;
1143 bstride =
FFALIGN(width, 16) >> 3;
1146 for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
1147 for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
1158 if (cblocks > nblocks)
1163 c->
kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
1174 width, height, pal, npal, tidx);
1185 aligned_height = c->
height + 15;
1229 uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
1230 uint32_t cursor_hot_x, cursor_hot_y;
1231 int cursor_fmt, err;
1233 cur_size = bytestream2_get_be32(gb);
1234 cursor_w = bytestream2_get_byte(gb);
1235 cursor_h = bytestream2_get_byte(gb);
1236 cursor_hot_x = bytestream2_get_byte(gb);
1237 cursor_hot_y = bytestream2_get_byte(gb);
1238 cursor_fmt = bytestream2_get_byte(gb);
1240 cursor_stride =
FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
1242 if (cursor_w < 1 || cursor_w > 256 ||
1243 cursor_h < 1 || cursor_h > 256) {
1245 cursor_w, cursor_h);
1248 if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
1250 cursor_hot_x, cursor_hot_y);
1251 cursor_hot_x =
FFMIN(cursor_hot_x, cursor_w - 1);
1252 cursor_hot_y =
FFMIN(cursor_hot_y, cursor_h - 1);
1260 if (cursor_fmt != 1 && cursor_fmt != 32) {
1281 for (j = 0; j < c->
cursor_h; j++) {
1282 for (i = 0; i < c->
cursor_w; i += 32) {
1283 bits = bytestream2_get_be32(gb);
1284 for (k = 0; k < 32; k++) {
1285 dst[0] = !!(bits & 0x80000000);
1293 for (j = 0; j < c->
cursor_h; j++) {
1294 for (i = 0; i < c->
cursor_w; i += 32) {
1295 bits = bytestream2_get_be32(gb);
1296 for (k = 0; k < 32; k++) {
1297 int mask_bit = !!(bits & 0x80000000);
1298 switch (dst[0] * 2 + mask_bit) {
1326 for (j = 0; j < c->
cursor_h; j++) {
1327 for (i = 0; i < c->
cursor_w; i++) {
1328 int val = bytestream2_get_be32(gb);
1342 #define APPLY_ALPHA(src, new, alpha) \ 1343 src = (src * (256 - alpha) + new * alpha) >> 8 1361 if (x + w > c->
width)
1382 for (j = 0; j <
h; j++) {
1383 for (i = 0; i <
w; i++) {
1385 APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
1386 APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
1387 APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
1395 int *got_picture_ptr,
AVPacket *avpkt)
1398 int buf_size = avpkt->
size;
1404 uint32_t chunk_size, r_mask, g_mask, b_mask;
1409 if (buf_size < 12) {
1411 "Frame should have at least 12 bytes, got %d instead\n",
1418 magic = bytestream2_get_be32(&bc);
1419 if ((magic & ~0xF) !=
MKBETAG(
'G',
'2',
'M',
'0') ||
1420 (magic & 0xF) < 2 || (magic & 0xF) > 5) {
1428 chunk_size = bytestream2_get_le32(&bc) - 1;
1429 chunk_type = bytestream2_get_byte(&bc);
1433 chunk_size, chunk_type);
1436 switch (chunk_type) {
1440 if (chunk_size < 21) {
1445 c->
width = bytestream2_get_be32(&bc);
1446 c->
height = bytestream2_get_be32(&bc);
1449 "Invalid frame dimensions %dx%d\n",
1474 "Invalid tile dimensions %dx%d\n",
1481 c->
bpp = bytestream2_get_byte(&bc);
1484 (chunk_size - 21) < 16) {
1486 "Display info: missing bitmasks!\n");
1490 r_mask = bytestream2_get_be32(&bc);
1491 g_mask = bytestream2_get_be32(&bc);
1492 b_mask = bytestream2_get_be32(&bc);
1493 if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
1495 "Bitmasks: R=%"PRIX32
", G=%"PRIX32
", B=%"PRIX32,
1496 r_mask, g_mask, b_mask);
1514 "No display info - skipping tile\n");
1517 if (chunk_size < 2) {
1522 c->
tile_x = bytestream2_get_byte(&bc);
1523 c->
tile_y = bytestream2_get_byte(&bc);
1526 "Invalid tile pos %d,%d (in %dx%d grid)\n",
1535 chunk_size - 2, avctx);
1548 if (chunk_size < 5) {
1553 c->
cursor_x = bytestream2_get_be16(&bc);
1554 c->
cursor_y = bytestream2_get_be16(&bc);
1557 if (chunk_size < 8) {
1587 for (i = 0; i < avctx->
height; i++)
1593 *got_picture_ptr = 1;
#define LOAD_NEIGHBOURS(x)
static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int epic_handle_edges(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row, uint32_t *pPix)
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size)
static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
This structure describes decoded (raw) audio or video data.
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
ptrdiff_t const GLvoid * data
static av_cold void jpg_free_context(JPGContext *ctx)
static int chunk_start(AVFormatContext *s)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int epic_predict_pixel(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
uint8_t nw_pred_rung[256]
Entropy Logarithmic-Scale binary arithmetic coder.
void(* clear_block)(int16_t *block)
#define avpriv_request_sample(...)
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height, int tile_width, int stride)
uint32_t stack[EPIC_PIX_STACK_SIZE]
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int g2m_init_buffers(G2MContext *c)
MJPEG encoder and decoder.
static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
The exact code depends on how similar the blocks are and how related they are to the block
static int epic_cache_entries_for_pixel(const ePICPixHash *hash, uint32_t pix)
static ePICPixHashElem * epic_hash_find(const ePICPixHash *hash, uint32_t key)
static int epic_decode_component_pred(ePICContext *dc, int N, int W, int NW)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
struct ePICPixListElem * list
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
bitstream reader API header.
static void epic_free_pixel_cache(ePICPixHash *hash)
static av_cold int g2m_decode_init(AVCodecContext *avctx)
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static int get_bits_left(GetBitContext *gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
ElsUnsignedRung unsigned_rung
static const uint16_t mask[17]
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb)
static av_cold int g2m_decode_end(AVCodecContext *avctx)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
void(* clear_blocks)(int16_t *blocks)
uint8_t runlen_zeroes[14]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, size_t src_size, AVCodecContext *avctx)
const char * name
Name of the codec implementation.
void ff_els_decoder_init(ElsDecCtx *ctx, const uint8_t *in, size_t data_size)
static ePICPixHashElem * epic_hash_add(ePICPixHash *hash, uint32_t key)
int64_t max_pixels
The number of pixels per image to maximally accept.
static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block)
uint8_t prev_row_rung[14]
static FFFrameBucket * bucket(FFFrameQueue *fq, size_t idx)
enum AVPictureType pict_type
Picture type of the frame.
static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv)
static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt)
int width
picture width / height.
static int epic_predict_pixel2(ePICContext *dc, uint8_t *rung, uint32_t *pPix, uint32_t pix)
static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
uint8_t idct_permutation[64]
IDCT input permutation.
uint8_t ne_pred_rung[256]
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y, const uint32_t *curr_row, const uint32_t *above_row)
static av_always_inline int bytestream2_tell(GetByteContext *g)
#define EPIC_PIX_STACK_MAX
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Libavcodec external API header.
static int kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define APPLY_ALPHA(src, new, alpha)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int bucket_size[EPIC_HASH_SIZE]
static const int16_t alpha[]
main external API structure.
static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
const uint8_t avpriv_mjpeg_val_dc[12]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int djb2_hash(uint32_t key)
const uint8_t ff_zigzag_direct[64]
ePICPixHashElem * bucket[EPIC_HASH_SIZE]
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
unsigned ff_els_decode_unsigned(ElsDecCtx *ctx, ElsUnsignedRung *ur)
static const uint8_t chroma_quant[64]
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
struct ePICPixListElem * next
int bucket_fill[EPIC_HASH_SIZE]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void epic_hash_init(ePICPixHash *hash)
GLint GLenum GLboolean GLsizei stride
common internal api header.
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
static const uint8_t luma_quant[64]
#define MKBETAG(a, b, c, d)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
#define UPDATE_NEIGHBOURS(x)
const uint8_t avpriv_mjpeg_val_ac_luminance[]
#define EPIC_PIX_STACK_SIZE
VLC_TYPE(* table)[2]
code, bits
void ff_els_decoder_uninit(ElsUnsignedRung *rung)
int key_frame
1 -> keyframe, 0-> not
int ff_els_decode_bit(ElsDecCtx *ctx, uint8_t *rung)
int frame_number
Frame counter, set by libavcodec.
static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width, const uint32_t *curr_row, const uint32_t *above_row, const uint32_t *above2_row, uint32_t *pPix, int *pRun)
void(* idct)(int16_t *block)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double val(void *priv, double ch)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void * av_mallocz_array(size_t nmemb, size_t size)