33 #define HEADER_SIZE 27
35 #define MODEL2_SCALE 13
36 #define MODEL_SCALE 15
37 #define MODEL256_SEC_SCALE 9
45 typedef struct Model {
60 #define RAC_BOTTOM 0x01000000
182 m->
freqs[i] = sum * scale >> 16;
197 for (i = 0; i < m->
num_syms - 1; i++)
230 for (i = 0; i < 256; i++) {
237 for (i = 0; i < 256; i++) {
238 m->
freqs[i] = sum * scale >> 16;
244 while (sidx < m->sec_size)
257 for (i = 0; i < 255; i++)
284 for (i = 0; i <
FFMIN(size, 4); i++)
286 c->
range = 0xFFFFFFFF;
297 }
else if (!c->
low) {
341 bit = (c->
low >= helper);
361 unsigned prob, prob2, helper;
371 if (helper <= c->low) {
378 end = (end2 +
val) >> 1;
379 }
while (end != val);
381 c->
range = prob2 - prob;
392 int prob, prob2, helper,
val;
403 end = start = m->
secondary[ssym + 1] + 1;
404 while (end > val + 1) {
405 ssym = (end +
val) >> 1;
406 if (m->
freqs[ssym] <= helper) {
410 end = (end +
val) >> 1;
419 c->
range = prob2 - prob;
460 for (i = 0; i < block_size; i++, dst +=
stride)
461 memset(dst, fc->
fill_val, block_size);
474 for (i = 0; i < vec_size; i++)
478 memset(prev_line, 0,
sizeof(prev_line));
480 for (j = 0; j < block_size; j++) {
483 for (i = 0; i < block_size; i++) {
501 int skip,
val, sign, pos = 1, zz_pos,
dc;
504 memset(block, 0,
sizeof(*block) * 64);
526 block[0] = dc * bc->
qmat[0];
553 block[zz_pos] = val * bc->
qmat[zz_pos];
557 return pos == 64 ? 0 : -1;
562 int *
block,
int mb_x,
int mb_y)
566 int nblocks = block_size >> 3;
571 for (j = 0; j < nblocks; j++) {
572 for (i = 0; i < nblocks; i++) {
573 if (
decode_dct(c, bc, block, bx + i, by + j)) {
585 int block_size,
int *
block)
587 const int hsize = block_size >> 1;
591 for (j = 0; j < block_size; j++) {
592 for (i = 0; i < block_size; i++) {
593 if (i < hsize && j < hsize)
597 block[i] *= hc->
scale;
601 block -= block_size * block_size;
603 for (j = 0; j < hsize; j++) {
604 for (i = 0; i < hsize; i++) {
606 B = block[i + hsize];
607 C = block[i + hsize * block_size];
608 D = block[i + hsize * block_size + hsize];
614 dst[i * 2] = av_clip_uint8(t1 - t2);
615 dst[i * 2 +
stride] = av_clip_uint8(t1 + t2);
616 dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
617 dst[i * 2 + 1 +
stride] = av_clip_uint8(t3 + t4);
628 for (i = 0; i < 3; i++) {
630 for (j = 0; j < 5; j++)
637 for (j = 0; j < 125; j++)
663 for (i = 0; i < 3; i++) {
664 for (j = 0; j < 5; j++)
670 for (j = 0; j < 125; j++)
683 int buf_size = avpkt->
size;
688 int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
689 int x, y, i, mb_width, mb_height, blk_size, btype;
694 "Frame should have at least %d bytes, got %d instead\n",
700 keyframe = bytestream2_get_be32(&gb);
701 if (keyframe & ~0x301) {
705 keyframe = !(keyframe & 1);
707 dec_x = bytestream2_get_be16(&gb);
708 dec_y = bytestream2_get_be16(&gb);
709 dec_width = bytestream2_get_be16(&gb);
710 dec_height = bytestream2_get_be16(&gb);
712 if (dec_x + dec_width > avctx->
width ||
713 dec_y + dec_height > avctx->
height ||
714 (dec_width | dec_height) & 0xF) {
716 dec_width, dec_height, dec_x, dec_y);
720 quality = bytestream2_get_byte(&gb);
721 if (quality < 1 || quality > 100) {
751 mb_width = dec_width >> 4;
752 mb_height = dec_height >> 4;
756 for (y = 0; y < mb_height; y++) {
757 for (x = 0; x < mb_width; x++) {
758 for (i = 0; i < 3; i++) {
765 dst[i] + x * blk_size,
770 dst[i] + x * blk_size,
775 dst[i] + x * blk_size,
781 dst[i] + x * blk_size,
813 for (i = 0; i < 3; i++)
826 if ((avctx->
width & 0xF) || (avctx->
height & 0xF)) {
828 "Image dimensions should be a multiple of 16.\n");
833 for (i = 0; i < 3; i++) {
834 int b_width = avctx->
width >> (2 + !!i);
835 int b_height = avctx->
height >> (2 + !!i);
const char const char void * val
void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
Generate quantisation matrix for given quality.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic, uint8_t *dst, ptrdiff_t stride, int block_size)
static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
This structure describes decoded (raw) audio or video data.
#define MODEL256_SEC_SCALE
ptrdiff_t const GLvoid * data
static void model2_reset(Model2 *m)
static av_cold int init(AVCodecContext *avctx)
ImageBlockCoder image_coder[3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int16_t weights[MODEL_MAX_SYMS+1]
static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block)
static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int end(AVCodecContext *avctx)
static void rac_normalise(RangeCoder *c)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block, int bx, int by)
static void reset_coders(MSS3Context *ctx, int quality)
static void rac_init(RangeCoder *c, const uint8_t *src, int size)
static void model256_update(Model256 *m, int val)
static av_cold void model_init(Model *m, int num_syms)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
DCTBlockCoder dct_coder[3]
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
const char * name
Name of the codec implementation.
static av_cold void model256_init(Model256 *m)
HaarBlockCoder haar_coder[3]
void ff_mss34_dct_put(uint8_t *dst, ptrdiff_t stride, int *block)
Transform and output DCT block.
static const uint16_t fc[]
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
enum AVPictureType pict_type
Picture type of the frame.
static av_cold int mss3_decode_init(AVCodecContext *avctx)
int width
picture width / height.
static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block, int mb_x, int mb_y)
static int rac_get_bits(RangeCoder *c, int nbits)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void model_reset(Model *m)
static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
Libavcodec external API header.
static int rac_get_model_sym(RangeCoder *c, Model *m)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
static void model_update(Model *m, int val)
static av_cold void init_coders(MSS3Context *ctx)
static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc, uint8_t *dst, ptrdiff_t stride, int block_size)
static int rac_get_bit(RangeCoder *c)
static void model256_reset(Model256 *m)
const uint8_t ff_zigzag_direct[64]
FillBlockCoder fill_coder[3]
static av_cold int mss3_decode_end(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void model2_update(Model2 *m, int bit)
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static int decode_coeff(RangeCoder *c, Model *m)
int key_frame
1 -> keyframe, 0-> not
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
BlockTypeContext btype[3]
static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.