Go to the documentation of this file.
53 #if AV_GCC_VERSION_AT_LEAST(10, 0) && AV_GCC_VERSION_AT_MOST(12, 0) \
54 && !defined(__clang__) && !defined(__INTEL_COMPILER)
55 #pragma GCC optimize ("no-ipa-cp-clone")
118 s->frame_width,
s->frame_height);
131 #define QUALITY_THRESHOLD 100
132 #define THRESHOLD_MULTIPLIER 0.6
136 int threshold,
int lambda,
int intra)
138 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
141 int w = 2 << (
level + 2 >> 1);
142 int h = 2 << (
level + 1 >> 1);
144 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
145 const int8_t *codebook_sum, *
codebook;
146 const uint16_t(*mean_vlc)[2];
147 const uint8_t(*multistage_vlc)[2];
158 for (y = 0; y <
h; y++) {
159 for (x = 0; x <
w; x++) {
172 for (y = 0; y <
h; y++) {
173 for (x = 0; x <
w; x++) {
187 for (count = 1; count < 7; count++) {
188 int best_vector_score = INT_MAX;
189 int best_vector_sum = -999, best_vector_mean = -999;
190 const int stage = count - 1;
191 const int8_t *vector;
193 for (
i = 0;
i < 16;
i++) {
194 int sum = codebook_sum[stage * 16 +
i];
198 sqr =
s->svq1encdsp.ssd_int8_vs_int16(vector,
block[stage],
size);
201 if (score < best_vector_score) {
205 best_vector_score = score;
206 best_vector[stage] =
i;
207 best_vector_sum = sum;
208 best_vector_mean =
mean;
213 for (j = 0; j <
size; j++)
214 block[stage + 1][j] =
block[stage][j] - vector[j];
216 best_vector_score += lambda *
218 multistage_vlc[1 + count][1]
219 + mean_vlc[best_vector_mean][1]);
221 if (best_vector_score < best_score) {
222 best_score = best_vector_score;
224 best_mean = best_vector_mean;
229 if (best_mean == -128)
231 else if (best_mean == 128)
235 if (best_score > threshold &&
level) {
241 backup[
i] =
s->reorder_pb[
i];
243 threshold >> 1, lambda, intra);
248 if (score < best_score) {
253 s->reorder_pb[
i] = backup[
i];
260 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
261 av_assert1(best_mean >= -256 && best_mean < 256);
262 av_assert1(best_count >= 0 && best_count < 7);
267 multistage_vlc[1 + best_count][1],
268 multistage_vlc[1 + best_count][0]);
270 mean_vlc[best_mean][0]);
272 for (
i = 0;
i < best_count;
i++) {
277 for (y = 0; y <
h; y++)
278 for (x = 0; x <
w; x++)
280 block[best_count][x +
w * y] +
289 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
290 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
291 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
292 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
297 const unsigned char *src_plane,
298 unsigned char *ref_plane,
299 unsigned char *decoded_plane,
305 int block_width, block_height;
309 const int lambda = (
s->quality *
s->quality) >>
317 block_width = (
width + 15) / 16;
318 block_height = (
height + 15) / 16;
324 s->m.new_pic->linesize[0] =
334 s->m.me.scene_change_score = 0;
337 s->m.lambda =
s->quality;
338 s2->
qscale =
s->m.lambda * 139 +
341 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
345 s->m.mb_type =
s->mb_type;
348 s->m.mb_mean = (uint8_t *)
s->dummy;
349 s->m.mb_var = (uint16_t *)
s->dummy;
350 s->m.mc_mb_var = (uint16_t *)
s->dummy;
354 s->m.p_mv_table =
s->motion_val16[plane] +
358 s->m.me.dia_size =
s->avctx->dia_size;
360 for (y = 0; y < block_height; y++) {
361 s->m.new_pic->data[0] =
src - y * 16 *
stride;
364 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
365 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
367 for (x =
width; x < 16 * block_width; x++)
370 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
374 for (x = 0; x < block_width; x++) {
389 for (y = 0; y < block_height; y++) {
390 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
391 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
393 for (x =
width; x < 16 * block_width; x++)
396 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
400 for (x = 0; x < block_width; x++) {
401 uint8_t reorder_buffer[2][6][7 * 32];
404 uint8_t *decoded = decoded_plane +
offset;
406 int score[4] = { 0, 0, 0, 0 }, best;
407 uint8_t *
temp =
s->scratchbuf;
420 for (
i = 0;
i < 6;
i++)
429 for (
i = 0;
i < 6;
i++) {
439 int mx,
my, pred_x, pred_y, dxy;
445 for (
i = 0;
i < 6;
i++)
461 dxy = (
mx & 1) + 2 * (
my & 1);
469 decoded,
stride, 5, 64, lambda, 0);
470 best = score[1] <= score[0];
475 if (score[2] < score[best] &&
mx == 0 &&
my == 0) {
483 for (
i = 0;
i < 6;
i++) {
499 s->rd_total += score[best];
502 for (
i = 5;
i >= 0;
i--)
528 for (
i = 0;
i < 3;
i++) {
542 int size = strlen(ident);
558 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
572 if (!
s->current_picture || !
s->last_picture) {
585 s->frame_width = avctx->
width;
586 s->frame_height = avctx->
height;
588 s->y_block_width = (
s->frame_width + 15) / 16;
589 s->y_block_height = (
s->frame_height + 15) / 16;
592 s->m.c.avctx = avctx;
594 for (
size_t plane = 0; plane <
FF_ARRAY_ELEMS(
s->motion_val16); ++plane) {
595 const int shift = plane ? 2 : 0;
596 unsigned block_height = ((
s->frame_height >>
shift) + 15
U) / 16;
597 unsigned block_width = ((
s->frame_width >>
shift) + 15
U) / 16;
599 s->motion_val8[plane] =
av_calloc((2 * block_width + 1) * block_height * 2 + 2,
600 2 *
sizeof(int16_t));
601 s->motion_val16[plane] =
av_calloc((block_width + 1) * (block_height + 2) + 1,
602 2 *
sizeof(int16_t));
603 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
610 2 * 16 * 2 *
sizeof(uint8_t));
612 s->y_block_height *
sizeof(int16_t));
614 s->y_block_height *
sizeof(
int32_t));
617 if (!
s->m.me.scratchpad ||
618 !
s->mb_type || !
s->dummy || !
s->m.new_pic)
629 const AVFrame *pict,
int *got_packet)
652 for (
i = 0;
i < 3;
i++) {
655 s->last_picture->data[
i],
656 s->current_picture->data[
i],
657 s->frame_width / (
i ? 4 : 1),
658 s->frame_height / (
i ? 4 : 1),
660 s->current_picture->linesize[
i]);
680 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
681 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
700 CODEC_LONG_NAME(
"Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
ptrdiff_t linesize[MPV_MAX_PLANES]
#define CODEC_PIXFMTS(...)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
static int put_bytes_output(const PutBitContext *s)
static av_cold int write_ident(AVCodecContext *avctx, const char *ident)
static const AVOption options[]
#define SVQ1_BLOCK_INTRA_CODE
const FF_VISIBILITY_PUSH_HIDDEN int8_t *const ff_svq1_inter_codebooks[6]
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
void ff_me_init_pic(MPVEncContext *const s)
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define CANDIDATE_MB_TYPE_INTER
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
enum AVPictureType pict_type
#define SVQ1_BLOCK_SKIP_CODE
static double sqr(double in)
int height
picture size. must be a multiple of 16
int16_t encoded_block_levels[6][7][256]
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
uint32_t * mb_type
types and macros are defined in mpegutils.h
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int16_t(*[2] motion_val)[2]
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
const uint16_t ff_svq1_inter_mean_vlc[512][2]
int mb_height
number of MBs horizontally & vertically
AVCodec p
The public AVCodec.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
SVQ1EncDSPContext svq1encdsp
ptrdiff_t linesize
line size, in bytes, may be different from width
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
#define FF_CODEC_ENCODE_CB(func)
static int put_bytes_left(const PutBitContext *s, int round_up)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static int svq1_encode_plane(SVQ1EncContext *s, int plane, PutBitContext *pb, const unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
#define SVQ1_BLOCK_SKIP_LEN
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define SVQ1_BLOCK_INTRA_LEN
#define THRESHOLD_MULTIPLIER
const FFCodec ff_svq1_encoder
MPVWorkPicture cur_pic
copy of the current picture structure.
static void svq1_write_header(SVQ1EncContext *s, PutBitContext *pb, int frame_type)
#define CODEC_LONG_NAME(str)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
AVFrame * current_picture
uint8_t * data[MPV_MAX_PLANES]
static void ff_svq1enc_init(SVQ1EncDSPContext *c)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
const uint8_t(* ff_h263_get_mv_penalty(void))[MAX_DMV *2+1]
static av_cold int svq1_encode_end(AVCodecContext *avctx)
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
#define DECLARE_ALIGNED(n, t, v)
int first_slice_line
used in MPEG-4 too to handle resync markers
static int shift(int a, int b)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
PutBitContext reorder_pb[6]
#define CANDIDATE_MB_TYPE_INTRA
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define SVQ1_BLOCK_INTER_CODE
int flags
A combination of AV_PKT_FLAG values.
static void init_block_index(MpegEncContext *const s)
#define QUALITY_THRESHOLD
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define SVQ1_BLOCK_INTER_LEN
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
MPVWorkPicture last_pic
copy of the previous picture structure.
int16_t(*[3] motion_val8)[2]
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
void * av_calloc(size_t nmemb, size_t size)
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
int64_t frame_num
Frame counter, set by libavcodec.
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
int16_t(*[3] motion_val16)[2]
main external API structure.
const uint16_t ff_svq1_intra_mean_vlc[256][2]
@ AV_OPT_TYPE_INT
Underlying C type is int.
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, unsigned level, int threshold, int lambda, int intra)
static const AVClass svq1enc_class
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
@ AV_PICTURE_TYPE_P
Predicted.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
#define MKTAG(a, b, c, d)
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
const uint16_t ff_svq1_frame_size_table[7][2]
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
static av_cold int svq1_encode_init(AVCodecContext *avctx)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
static const unsigned codebook[256][2]