Go to the documentation of this file.
35 #include "config_components.h"
43 for (
i = 0;
i <
h;
i++) {
44 s += sq[pix1[0] - pix2[0]];
45 s += sq[pix1[1] - pix2[1]];
46 s += sq[pix1[2] - pix2[2]];
47 s += sq[pix1[3] - pix2[3]];
60 for (
i = 0;
i <
h;
i++) {
61 s += sq[pix1[0] - pix2[0]];
62 s += sq[pix1[1] - pix2[1]];
63 s += sq[pix1[2] - pix2[2]];
64 s += sq[pix1[3] - pix2[3]];
65 s += sq[pix1[4] - pix2[4]];
66 s += sq[pix1[5] - pix2[5]];
67 s += sq[pix1[6] - pix2[6]];
68 s += sq[pix1[7] - pix2[7]];
81 for (
i = 0;
i <
h;
i++) {
82 s += sq[pix1[0] - pix2[0]];
83 s += sq[pix1[1] - pix2[1]];
84 s += sq[pix1[2] - pix2[2]];
85 s += sq[pix1[3] - pix2[3]];
86 s += sq[pix1[4] - pix2[4]];
87 s += sq[pix1[5] - pix2[5]];
88 s += sq[pix1[6] - pix2[6]];
89 s += sq[pix1[7] - pix2[7]];
90 s += sq[pix1[8] - pix2[8]];
91 s += sq[pix1[9] - pix2[9]];
92 s += sq[pix1[10] - pix2[10]];
93 s += sq[pix1[11] - pix2[11]];
94 s += sq[pix1[12] - pix2[12]];
95 s += sq[pix1[13] - pix2[13]];
96 s += sq[pix1[14] - pix2[14]];
97 s += sq[pix1[15] - pix2[15]];
109 for (
i = 0;
i < 64;
i++)
114 #define avg2(a, b) (((a) + (b) + 1) >> 1)
115 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
122 for (
i = 0;
i <
h;
i++) {
123 s +=
abs(pix1[0] - pix2[0]);
124 s +=
abs(pix1[1] - pix2[1]);
125 s +=
abs(pix1[2] - pix2[2]);
126 s +=
abs(pix1[3] - pix2[3]);
127 s +=
abs(pix1[4] - pix2[4]);
128 s +=
abs(pix1[5] - pix2[5]);
129 s +=
abs(pix1[6] - pix2[6]);
130 s +=
abs(pix1[7] - pix2[7]);
131 s +=
abs(pix1[8] - pix2[8]);
132 s +=
abs(pix1[9] - pix2[9]);
133 s +=
abs(pix1[10] - pix2[10]);
134 s +=
abs(pix1[11] - pix2[11]);
135 s +=
abs(pix1[12] - pix2[12]);
136 s +=
abs(pix1[13] - pix2[13]);
137 s +=
abs(pix1[14] - pix2[14]);
138 s +=
abs(pix1[15] - pix2[15]);
150 #define V(x) (pix1[x] - pix2[x])
172 for (
i = 1;
i <
h;
i++) {
174 for (j = 1; j < 16; j++)
189 for (
i = 0;
i <
h;
i++) {
190 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
191 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
192 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
193 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
194 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
195 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
196 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
197 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
198 s +=
abs(pix1[8] -
avg2(pix2[8], pix2[9]));
199 s +=
abs(pix1[9] -
avg2(pix2[9], pix2[10]));
200 s +=
abs(pix1[10] -
avg2(pix2[10], pix2[11]));
201 s +=
abs(pix1[11] -
avg2(pix2[11], pix2[12]));
202 s +=
abs(pix1[12] -
avg2(pix2[12], pix2[13]));
203 s +=
abs(pix1[13] -
avg2(pix2[13], pix2[14]));
204 s +=
abs(pix1[14] -
avg2(pix2[14], pix2[15]));
205 s +=
abs(pix1[15] -
avg2(pix2[15], pix2[16]));
216 const uint8_t *pix3 = pix2 +
stride;
218 for (
i = 0;
i <
h;
i++) {
219 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
220 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
221 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
222 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
223 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
224 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
225 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
226 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
227 s +=
abs(pix1[8] -
avg2(pix2[8], pix3[8]));
228 s +=
abs(pix1[9] -
avg2(pix2[9], pix3[9]));
229 s +=
abs(pix1[10] -
avg2(pix2[10], pix3[10]));
230 s +=
abs(pix1[11] -
avg2(pix2[11], pix3[11]));
231 s +=
abs(pix1[12] -
avg2(pix2[12], pix3[12]));
232 s +=
abs(pix1[13] -
avg2(pix2[13], pix3[13]));
233 s +=
abs(pix1[14] -
avg2(pix2[14], pix3[14]));
234 s +=
abs(pix1[15] -
avg2(pix2[15], pix3[15]));
246 const uint8_t *pix3 = pix2 +
stride;
248 for (
i = 0;
i <
h;
i++) {
249 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
250 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
251 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
252 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
253 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
254 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
255 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
256 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
257 s +=
abs(pix1[8] -
avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
258 s +=
abs(pix1[9] -
avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
259 s +=
abs(pix1[10] -
avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
260 s +=
abs(pix1[11] -
avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
261 s +=
abs(pix1[12] -
avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
262 s +=
abs(pix1[13] -
avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
263 s +=
abs(pix1[14] -
avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
264 s +=
abs(pix1[15] -
avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
277 for (
i = 0;
i <
h;
i++) {
278 s +=
abs(pix1[0] - pix2[0]);
279 s +=
abs(pix1[1] - pix2[1]);
280 s +=
abs(pix1[2] - pix2[2]);
281 s +=
abs(pix1[3] - pix2[3]);
282 s +=
abs(pix1[4] - pix2[4]);
283 s +=
abs(pix1[5] - pix2[5]);
284 s +=
abs(pix1[6] - pix2[6]);
285 s +=
abs(pix1[7] - pix2[7]);
297 #define V(x) (pix1[x] - pix2[x])
311 for (
i = 1;
i <
h;
i++) {
313 for (j = 1; j < 8; j++)
328 for (
i = 0;
i <
h;
i++) {
329 s +=
abs(pix1[0] -
avg2(pix2[0], pix2[1]));
330 s +=
abs(pix1[1] -
avg2(pix2[1], pix2[2]));
331 s +=
abs(pix1[2] -
avg2(pix2[2], pix2[3]));
332 s +=
abs(pix1[3] -
avg2(pix2[3], pix2[4]));
333 s +=
abs(pix1[4] -
avg2(pix2[4], pix2[5]));
334 s +=
abs(pix1[5] -
avg2(pix2[5], pix2[6]));
335 s +=
abs(pix1[6] -
avg2(pix2[6], pix2[7]));
336 s +=
abs(pix1[7] -
avg2(pix2[7], pix2[8]));
347 const uint8_t *pix3 = pix2 +
stride;
349 for (
i = 0;
i <
h;
i++) {
350 s +=
abs(pix1[0] -
avg2(pix2[0], pix3[0]));
351 s +=
abs(pix1[1] -
avg2(pix2[1], pix3[1]));
352 s +=
abs(pix1[2] -
avg2(pix2[2], pix3[2]));
353 s +=
abs(pix1[3] -
avg2(pix2[3], pix3[3]));
354 s +=
abs(pix1[4] -
avg2(pix2[4], pix3[4]));
355 s +=
abs(pix1[5] -
avg2(pix2[5], pix3[5]));
356 s +=
abs(pix1[6] -
avg2(pix2[6], pix3[6]));
357 s +=
abs(pix1[7] -
avg2(pix2[7], pix3[7]));
369 const uint8_t *pix3 = pix2 +
stride;
371 for (
i = 0;
i <
h;
i++) {
372 s +=
abs(pix1[0] -
avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
373 s +=
abs(pix1[1] -
avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
374 s +=
abs(pix1[2] -
avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
375 s +=
abs(pix1[3] -
avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
376 s +=
abs(pix1[4] -
avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
377 s +=
abs(pix1[5] -
avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
378 s +=
abs(pix1[6] -
avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
379 s +=
abs(pix1[7] -
avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
390 int score1 = 0, score2 = 0, x, y;
392 for (y = 0; y <
h; y++) {
393 for (x = 0; x < 16; x++)
394 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
396 for (x = 0; x < 15; x++)
398 s1[x + 1] + s1[x +
stride + 1]) -
400 s2[x + 1] + s2[x +
stride + 1]);
407 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
409 return score1 +
FFABS(score2) * 8;
415 int score1 = 0, score2 = 0, x, y;
417 for (y = 0; y <
h; y++) {
418 for (x = 0; x < 8; x++)
419 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
421 for (x = 0; x < 7; x++)
423 s1[x + 1] + s1[x +
stride + 1]) -
425 s2[x + 1] + s2[x +
stride + 1]);
432 return score1 +
FFABS(score2) *
c->c.avctx->nsse_weight;
434 return score1 +
FFABS(score2) * 8;
445 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
446 [FF_CMP_ ## CMP_FLAG] = { \
447 .offset = offsetof(MECmpContext, ARRAY), \
448 .mpv_only = MPVENC_ONLY, \
451 static const struct {
455 } cmp_func_list[] = {
458 ENTRY(SATD, hadamard8_diff, 0),
459 ENTRY(DCT, dct_sad, 1),
460 ENTRY(PSNR, quant_psnr, 1),
465 ENTRY(NSSE, nsse, 0),
466 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
470 ENTRY(DCTMAX, dct_max, 1),
472 ENTRY(DCT264, dct264_sad, 1),
474 ENTRY(MEDIAN_SAD, median_sad, 0),
481 for (
int i = 0;
i < 6;
i++)
487 !mpvenc && cmp_func_list[
type].mpv_only) {
489 "invalid cmp function selection\n");
492 me_cmp_func_array = (
const me_cmp_func*)(((
const char*)
c) + cmp_func_list[
type].offset);
493 for (
int i = 0;
i < 6;
i++)
494 cmp[
i] = me_cmp_func_array[
i];
499 #define BUTTERFLY2(o1, o2, i1, i2) \
503 #define BUTTERFLY1(x, y) \
512 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
517 int i,
temp[64], sum = 0;
519 for (
i = 0;
i < 8;
i++) {
545 for (
i = 0;
i < 8;
i++) {
567 int i,
temp[64], sum = 0;
569 for (
i = 0;
i < 8;
i++) {
591 for (
i = 0;
i < 8;
i++) {
621 return s->sum_abs_dctelem(
temp);
627 const int s07 = SRC(0) + SRC(7); \
628 const int s16 = SRC(1) + SRC(6); \
629 const int s25 = SRC(2) + SRC(5); \
630 const int s34 = SRC(3) + SRC(4); \
631 const int a0 = s07 + s34; \
632 const int a1 = s16 + s25; \
633 const int a2 = s07 - s34; \
634 const int a3 = s16 - s25; \
635 const int d07 = SRC(0) - SRC(7); \
636 const int d16 = SRC(1) - SRC(6); \
637 const int d25 = SRC(2) - SRC(5); \
638 const int d34 = SRC(3) - SRC(4); \
639 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
640 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
641 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
642 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
644 DST(1, a4 + (a7 >> 2)); \
645 DST(2, a2 + (a3 >> 1)); \
646 DST(3, a5 + (a6 >> 2)); \
648 DST(5, a6 - (a5 >> 2)); \
649 DST(6, (a2 >> 1) - a3); \
650 DST(7, (a4 >> 2) - a7); \
661 #define SRC(x) dct[i][x]
662 #define DST(x, v) dct[i][x] = v
663 for (
i = 0;
i < 8;
i++)
669 #define DST(x, v) sum += FFABS(v)
670 for (
i = 0;
i < 8;
i++)
687 for (
i = 0;
i < 64;
i++)
697 int16_t *
const bak =
temp + 64;
704 memcpy(bak,
temp, 64 *
sizeof(int16_t));
706 s->c.block_last_index[0 ] =
707 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
708 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
711 for (
i = 0;
i < 64;
i++)
720 const uint8_t *scantable =
s->c.intra_scantable.permutated;
725 const int esc_length =
s->ac_esc_length;
726 const uint8_t *length, *last_length;
731 s->pdsp.diff_pixels(
temp, lsrc1, lsrc2, 8);
733 s->c.block_last_index[0 ] =
735 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
741 length =
s->intra_ac_vlc_length;
742 last_length =
s->intra_ac_vlc_last_length;
743 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
746 length =
s->inter_ac_vlc_length;
747 last_length =
s->inter_ac_vlc_last_length;
750 if (last >= start_i) {
752 for (
i = start_i;
i < last;
i++) {
753 int j = scantable[
i];
758 if ((
level & (~127)) == 0)
772 if ((
level & (~127)) == 0) {
780 s->c.dct_unquantize_intra(&
s->c,
temp, 0,
s->c.qscale);
782 s->c.dct_unquantize_inter(&
s->c,
temp, 0,
s->c.qscale);
785 s->c.idsp.idct_add(lsrc2, 8,
temp);
787 distortion =
s->sse_cmp[1](
NULL, lsrc2, lsrc1, 8, 8);
789 return distortion + ((
bits *
s->c.qscale *
s->c.qscale * 109 + 64) >> 7);
795 const uint8_t *scantable =
s->c.intra_scantable.permutated;
798 const int esc_length =
s->ac_esc_length;
799 const uint8_t *length, *last_length;
803 s->c.block_last_index[0 ] =
805 s->dct_quantize(
s,
temp, 0 ,
s->c.qscale, &
i);
811 length =
s->intra_ac_vlc_length;
812 last_length =
s->intra_ac_vlc_last_length;
813 bits +=
s->luma_dc_vlc_length[
temp[0] + 256];
816 length =
s->inter_ac_vlc_length;
817 last_length =
s->inter_ac_vlc_last_length;
820 if (last >= start_i) {
822 for (
i = start_i;
i < last;
i++) {
823 int j = scantable[
i];
828 if ((
level & (~127)) == 0)
842 if ((
level & (~127)) == 0)
851 #define VSAD_INTRA(size) \
852 static int vsad_intra ## size ## _c(MPVEncContext *unused, \
853 const uint8_t *s, const uint8_t *dummy, \
854 ptrdiff_t stride, int h) \
856 int score = 0, x, y; \
858 for (y = 1; y < h; y++) { \
859 for (x = 0; x < size; x += 4) { \
860 score += FFABS(s[x] - s[x + stride]) + \
861 FFABS(s[x + 1] - s[x + stride + 1]) + \
862 FFABS(s[x + 2] - s[x + 2 + stride]) + \
863 FFABS(s[x + 3] - s[x + 3 + stride]); \
874 static int vsad ## size ## _c(MPVEncContext *unused, \
875 const uint8_t *s1, const uint8_t *s2, \
876 ptrdiff_t stride, int h) \
878 int score = 0, x, y; \
880 for (y = 1; y < h; y++) { \
881 for (x = 0; x < size; x++) \
882 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
892 #define SQ(a) ((a) * (a))
893 #define VSSE_INTRA(size) \
894 static int vsse_intra ## size ## _c(MPVEncContext *unused, \
895 const uint8_t *s, const uint8_t *dummy, \
896 ptrdiff_t stride, int h) \
898 int score = 0, x, y; \
900 for (y = 1; y < h; y++) { \
901 for (x = 0; x < size; x += 4) { \
902 score += SQ(s[x] - s[x + stride]) + \
903 SQ(s[x + 1] - s[x + stride + 1]) + \
904 SQ(s[x + 2] - s[x + stride + 2]) + \
905 SQ(s[x + 3] - s[x + stride + 3]); \
916 static int vsse ## size ## _c(MPVEncContext *unused, const uint8_t *s1, \
917 const uint8_t *s2, ptrdiff_t stride, int h) \
919 int score = 0, x, y; \
921 for (y = 1; y < h; y++) { \
922 for (x = 0; x < size; x++) \
923 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
933 #define WRAPPER8_16_SQ(name8, name16) \
934 static int name16(MPVEncContext *const s, const uint8_t *dst, \
935 const uint8_t *src, ptrdiff_t stride, int h) \
939 score += name8(s, dst, src, stride, 8); \
940 score += name8(s, dst + 8, src + 8, stride, 8); \
944 score += name8(s, dst, src, stride, 8); \
945 score += name8(s, dst + 8, src + 8, stride, 8); \
963 memset(
c, 0,
sizeof(*
c));
977 #define SET_CMP_FUNC(name) \
978 c->name[0] = name ## 16_c; \
979 c->name[1] = name ## 8x8_c;
982 c->hadamard8_diff[4] = hadamard8_intra16_c;
997 c->vsad[0] = vsad16_c;
998 c->vsad[1] = vsad8_c;
999 c->vsad[4] = vsad_intra16_c;
1000 c->vsad[5] = vsad_intra8_c;
1001 c->vsse[0] = vsse16_c;
1002 c->vsse[1] = vsse8_c;
1003 c->vsse[4] = vsse_intra16_c;
1004 c->vsse[5] = vsse_intra8_c;
1007 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
static int dct_sad8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static int sum_abs_dctelem_c(const int16_t *block)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int pix_median_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int nsse16_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
static int quant_psnr8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
#define bit(string, value)
#define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY)
static int hadamard8_diff8x8_c(MPVEncContext *unused, const uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs8_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int pix_median_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
#define LOCAL_ALIGNED_16(t, v,...)
static int pix_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs16_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int zero_cmp(MPVEncContext *s, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
static av_always_inline int cmp(MPVEncContext *const s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int pix_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
#define UNI_AC_ENC_INDEX(run, level)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
void ff_simple_idct_int16_8bit(int16_t *block)
#define i(width, name, range_min, range_max)
static int dct_max8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static void dct(AudioRNNContext *s, float *out, const float *in)
const EXTERN uint32_t ff_square_tab[512]
static int sse16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int sse4_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs8_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs8_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
static int pix_abs16_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
main external API structure.
#define WRAPPER8_16_SQ(name8, name16)
static int hadamard8_intra8x8_c(MPVEncContext *unused, const uint8_t *src, const uint8_t *dummy, ptrdiff_t stride, int h)
void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
static int bit8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
#define DCT8_1D(src, srcstride, dst, dststride)
static int nsse8_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
static int pix_abs16_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
void ff_dsputil_init_dwt(MECmpContext *c)
#define SET_CMP_FUNC(name)
The exact code depends on how similar the blocks are and how related they are to the block
#define BUTTERFLY2(o1, o2, i1, i2)
static int rd8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
static int sse8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)