Go to the documentation of this file.
52 #if AV_GCC_VERSION_AT_LEAST(10, 0) && AV_GCC_VERSION_AT_MOST(12, 0) \
53 && !defined(__clang__) && !defined(__INTEL_COMPILER)
54 #pragma GCC optimize ("no-ipa-cp-clone")
117 s->frame_width,
s->frame_height);
130 #define QUALITY_THRESHOLD 100
131 #define THRESHOLD_MULTIPLIER 0.6
135 int threshold,
int lambda,
int intra)
137 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
140 int w = 2 << (
level + 2 >> 1);
141 int h = 2 << (
level + 1 >> 1);
143 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
144 const int8_t *codebook_sum, *
codebook;
145 const uint16_t(*mean_vlc)[2];
146 const uint8_t(*multistage_vlc)[2];
157 for (y = 0; y <
h; y++) {
158 for (x = 0; x <
w; x++) {
171 for (y = 0; y <
h; y++) {
172 for (x = 0; x <
w; x++) {
186 for (count = 1; count < 7; count++) {
187 int best_vector_score = INT_MAX;
188 int best_vector_sum = -999, best_vector_mean = -999;
189 const int stage = count - 1;
190 const int8_t *vector;
192 for (
i = 0;
i < 16;
i++) {
193 int sum = codebook_sum[stage * 16 +
i];
197 sqr =
s->svq1encdsp.ssd_int8_vs_int16(vector,
block[stage],
size);
200 if (score < best_vector_score) {
204 best_vector_score = score;
205 best_vector[stage] =
i;
206 best_vector_sum = sum;
207 best_vector_mean =
mean;
212 for (j = 0; j <
size; j++)
213 block[stage + 1][j] =
block[stage][j] - vector[j];
215 best_vector_score += lambda *
217 multistage_vlc[1 + count][1]
218 + mean_vlc[best_vector_mean][1]);
220 if (best_vector_score < best_score) {
221 best_score = best_vector_score;
223 best_mean = best_vector_mean;
228 if (best_mean == -128)
230 else if (best_mean == 128)
234 if (best_score > threshold &&
level) {
240 backup[
i] =
s->reorder_pb[
i];
242 threshold >> 1, lambda, intra);
247 if (score < best_score) {
252 s->reorder_pb[
i] = backup[
i];
259 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
260 av_assert1(best_mean >= -256 && best_mean < 256);
261 av_assert1(best_count >= 0 && best_count < 7);
266 multistage_vlc[1 + best_count][1],
267 multistage_vlc[1 + best_count][0]);
269 mean_vlc[best_mean][0]);
271 for (
i = 0;
i < best_count;
i++) {
276 for (y = 0; y <
h; y++)
277 for (x = 0; x <
w; x++)
279 block[best_count][x +
w * y] +
288 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
289 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
290 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
291 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
296 const unsigned char *src_plane,
297 unsigned char *ref_plane,
298 unsigned char *decoded_plane,
304 int block_width, block_height;
308 const int lambda = (
s->quality *
s->quality) >>
316 block_width = (
width + 15) / 16;
317 block_height = (
height + 15) / 16;
323 s->m.new_pic->linesize[0] =
333 s->m.me.scene_change_score = 0;
336 s->m.lambda =
s->quality;
337 s2->
qscale =
s->m.lambda * 139 +
340 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
344 s->m.mb_type =
s->mb_type;
347 s->m.mb_mean = (uint8_t *)
s->dummy;
348 s->m.mb_var = (uint16_t *)
s->dummy;
349 s->m.mc_mb_var = (uint16_t *)
s->dummy;
353 s->m.p_mv_table =
s->motion_val16[plane] +
357 s->m.me.dia_size =
s->avctx->dia_size;
359 for (y = 0; y < block_height; y++) {
360 s->m.new_pic->data[0] =
src - y * 16 *
stride;
363 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
364 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
366 for (x =
width; x < 16 * block_width; x++)
369 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
373 for (x = 0; x < block_width; x++) {
388 for (y = 0; y < block_height; y++) {
389 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
390 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
392 for (x =
width; x < 16 * block_width; x++)
395 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
399 for (x = 0; x < block_width; x++) {
400 uint8_t reorder_buffer[2][6][7 * 32];
403 uint8_t *decoded = decoded_plane +
offset;
405 int score[4] = { 0, 0, 0, 0 }, best;
406 uint8_t *
temp =
s->scratchbuf;
419 for (
i = 0;
i < 6;
i++)
428 for (
i = 0;
i < 6;
i++) {
438 int mx,
my, pred_x, pred_y, dxy;
444 for (
i = 0;
i < 6;
i++)
460 dxy = (
mx & 1) + 2 * (
my & 1);
468 decoded,
stride, 5, 64, lambda, 0);
469 best = score[1] <= score[0];
474 if (score[2] < score[best] &&
mx == 0 &&
my == 0) {
482 for (
i = 0;
i < 6;
i++) {
498 s->rd_total += score[best];
501 for (
i = 5;
i >= 0;
i--)
527 for (
i = 0;
i < 3;
i++) {
541 int size = strlen(ident);
557 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
571 if (!
s->current_picture || !
s->last_picture) {
584 s->frame_width = avctx->
width;
585 s->frame_height = avctx->
height;
587 s->y_block_width = (
s->frame_width + 15) / 16;
588 s->y_block_height = (
s->frame_height + 15) / 16;
591 s->m.c.avctx = avctx;
593 for (
size_t plane = 0; plane <
FF_ARRAY_ELEMS(
s->motion_val16); ++plane) {
594 const int shift = plane ? 2 : 0;
595 unsigned block_height = ((
s->frame_height >>
shift) + 15
U) / 16;
596 unsigned block_width = ((
s->frame_width >>
shift) + 15
U) / 16;
598 s->motion_val8[plane] =
av_calloc((2 * block_width + 1) * block_height * 2 + 2,
599 2 *
sizeof(int16_t));
600 s->motion_val16[plane] =
av_calloc((block_width + 1) * (block_height + 2) + 1,
601 2 *
sizeof(int16_t));
602 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
609 2 * 16 * 2 *
sizeof(uint8_t));
611 s->y_block_height *
sizeof(int16_t));
613 s->y_block_height *
sizeof(
int32_t));
616 if (!
s->m.me.scratchpad ||
617 !
s->mb_type || !
s->dummy || !
s->m.new_pic)
628 const AVFrame *pict,
int *got_packet)
651 for (
i = 0;
i < 3;
i++) {
654 s->last_picture->data[
i],
655 s->current_picture->data[
i],
656 s->frame_width / (
i ? 4 : 1),
657 s->frame_height / (
i ? 4 : 1),
659 s->current_picture->linesize[
i]);
679 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
680 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
699 CODEC_LONG_NAME(
"Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
ptrdiff_t linesize[MPV_MAX_PLANES]
#define CODEC_PIXFMTS(...)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
static int put_bytes_output(const PutBitContext *s)
static av_cold int write_ident(AVCodecContext *avctx, const char *ident)
static const AVOption options[]
#define SVQ1_BLOCK_INTRA_CODE
const FF_VISIBILITY_PUSH_HIDDEN int8_t *const ff_svq1_inter_codebooks[6]
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
void ff_me_init_pic(MPVEncContext *const s)
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define CANDIDATE_MB_TYPE_INTER
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
enum AVPictureType pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
enum AVPictureType pict_type
#define SVQ1_BLOCK_SKIP_CODE
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static double sqr(double in)
int16_t encoded_block_levels[6][7][256]
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
uint32_t * mb_type
types and macros are defined in mpegutils.h
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int16_t(*[2] motion_val)[2]
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
const uint16_t ff_svq1_inter_mean_vlc[512][2]
AVCodec p
The public AVCodec.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
SVQ1EncDSPContext svq1encdsp
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
#define FF_CODEC_ENCODE_CB(func)
static int put_bytes_left(const PutBitContext *s, int round_up)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static int svq1_encode_plane(SVQ1EncContext *s, int plane, PutBitContext *pb, const unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
MPVWorkPicture cur_pic
copy of the current picture structure.
#define SVQ1_BLOCK_SKIP_LEN
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define SVQ1_BLOCK_INTRA_LEN
#define THRESHOLD_MULTIPLIER
const FFCodec ff_svq1_encoder
static void svq1_write_header(SVQ1EncContext *s, PutBitContext *pb, int frame_type)
#define CODEC_LONG_NAME(str)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
AVFrame * current_picture
uint8_t * data[MPV_MAX_PLANES]
static void ff_svq1enc_init(SVQ1EncDSPContext *c)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
const uint8_t(* ff_h263_get_mv_penalty(void))[MAX_DMV *2+1]
static av_cold int svq1_encode_end(AVCodecContext *avctx)
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
MPVWorkPicture last_pic
copy of the previous picture structure.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
#define DECLARE_ALIGNED(n, t, v)
static int shift(int a, int b)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
PutBitContext reorder_pb[6]
#define CANDIDATE_MB_TYPE_INTRA
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define SVQ1_BLOCK_INTER_CODE
int flags
A combination of AV_PKT_FLAG values.
static void init_block_index(MpegEncContext *const s)
#define QUALITY_THRESHOLD
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define SVQ1_BLOCK_INTER_LEN
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
const char * name
Name of the codec implementation.
int16_t(*[3] motion_val8)[2]
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
void * av_calloc(size_t nmemb, size_t size)
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
int64_t frame_num
Frame counter, set by libavcodec.
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int16_t(*[3] motion_val16)[2]
main external API structure.
const uint16_t ff_svq1_intra_mean_vlc[256][2]
@ AV_OPT_TYPE_INT
Underlying C type is int.
int height
picture size. must be a multiple of 16
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, unsigned level, int threshold, int lambda, int intra)
static const AVClass svq1enc_class
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
@ AV_PICTURE_TYPE_P
Predicted.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
#define MKTAG(a, b, c, d)
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
const uint16_t ff_svq1_frame_size_table[7][2]
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
static av_cold int svq1_encode_init(AVCodecContext *avctx)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
ptrdiff_t linesize
line size, in bytes, may be different from width
static const unsigned codebook[256][2]
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int first_slice_line
used in MPEG-4 too to handle resync markers
int mb_height
number of MBs horizontally & vertically