36 #define DEFINE_FN(op, size, insn) \ 37 static void op##_rv40_qpel##size##_mc33_##insn(uint8_t *dst, const uint8_t *src, \ 40 ff_##op##_pixels##size##_xy2_##insn(dst, src, stride, size); \ 45 ptrdiff_t
stride,
int h,
int x,
int y);
47 ptrdiff_t
stride,
int h,
int x,
int y);
49 ptrdiff_t
stride,
int h,
int x,
int y);
52 ptrdiff_t
stride,
int h,
int x,
int y);
54 ptrdiff_t
stride,
int h,
int x,
int y);
56 ptrdiff_t
stride,
int h,
int x,
int y);
58 #define DECLARE_WEIGHT(opt) \ 59 void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ 60 int w1, int w2, ptrdiff_t stride); \ 61 void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ 62 int w1, int w2, ptrdiff_t stride); \ 63 void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ 64 int w1, int w2, ptrdiff_t stride); \ 65 void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ 66 int w1, int w2, ptrdiff_t stride); 67 DECLARE_WEIGHT(mmxext)
79 #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \ 80 static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \ 86 LOCAL_ALIGNED(16, uint8_t, tmp, [SIZE * (SIZE + 5)]); \ 87 uint8_t *tmpptr = tmp + SIZE * 2; \ 90 for (i = 0; i < SIZE; i += LOOPSIZE) \ 91 ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \ 92 SIZE + 5, HCOFF(PH)); \ 93 for (i = 0; i < SIZE; i += LOOPSIZE) \ 94 ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \ 95 SIZE, SIZE, VCOFF(PV)); \ 97 for (i = 0; i < SIZE; i += LOOPSIZE) \ 98 ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \ 99 stride, SIZE, VCOFF(PV)); \ 101 for (i = 0; i < SIZE; i += LOOPSIZE) \ 102 ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \ 103 stride, SIZE, HCOFF(PH)); \ 109 #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \ 110 QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \ 111 QPEL_FUNC_DECL(OP, 16, PH, PV, OPT) 114 #define QPEL_MC_DECL(OP, OPT) \ 115 void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \ 116 const uint8_t *src, \ 117 ptrdiff_t srcStride, \ 119 void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \ 120 const uint8_t *src, \ 121 ptrdiff_t srcStride, \ 123 QPEL_FUNCS_DECL(OP, 0, 1, OPT) \ 124 QPEL_FUNCS_DECL(OP, 0, 3, OPT) \ 125 QPEL_FUNCS_DECL(OP, 1, 0, OPT) \ 126 QPEL_FUNCS_DECL(OP, 1, 1, OPT) \ 127 QPEL_FUNCS_DECL(OP, 1, 2, OPT) \ 128 QPEL_FUNCS_DECL(OP, 1, 3, OPT) \ 129 QPEL_FUNCS_DECL(OP, 2, 1, OPT) \ 130 QPEL_FUNCS_DECL(OP, 2, 2, OPT) \ 131 QPEL_FUNCS_DECL(OP, 2, 3, OPT) \ 132 QPEL_FUNCS_DECL(OP, 3, 0, OPT) \ 133 QPEL_FUNCS_DECL(OP, 3, 1, OPT) \ 134 QPEL_FUNCS_DECL(OP, 3, 2, OPT) 138 #define HCOFF(x) (32 * ((x) - 1)) 139 #define VCOFF(x) (32 * ((x) - 1)) 140 QPEL_MC_DECL(put_, _ssse3)
141 QPEL_MC_DECL(avg_, _ssse3)
147 #define HCOFF(x) (64 * ((x) - 1)) 148 #define VCOFF(x) (64 * ((x) - 1)) 149 QPEL_MC_DECL(put_, _sse2)
150 QPEL_MC_DECL(avg_, _sse2)
157 #define HCOFF(x) (64 * ((x) - 1)) 158 #define VCOFF(x) (64 * ((x) - 1)) 160 QPEL_MC_DECL(put_, _mmx)
162 #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx 163 #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx 164 QPEL_MC_DECL(avg_, _mmxext)
166 #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx 167 #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx 168 QPEL_MC_DECL(avg_, _3dnow)
173 #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \ 174 c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT; 177 #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \ 178 QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \ 179 QPEL_FUNC_SET(OP, 16, PH, PV, OPT) 182 #define QPEL_MC_SET(OP, OPT) \ 183 QPEL_FUNCS_SET (OP, 0, 1, OPT) \ 184 QPEL_FUNCS_SET (OP, 0, 3, OPT) \ 185 QPEL_FUNCS_SET (OP, 1, 0, OPT) \ 186 QPEL_FUNCS_SET (OP, 1, 1, OPT) \ 187 QPEL_FUNCS_SET (OP, 1, 2, OPT) \ 188 QPEL_FUNCS_SET (OP, 1, 3, OPT) \ 189 QPEL_FUNCS_SET (OP, 2, 1, OPT) \ 190 QPEL_FUNCS_SET (OP, 2, 2, OPT) \ 191 QPEL_FUNCS_SET (OP, 2, 3, OPT) \ 192 QPEL_FUNCS_SET (OP, 3, 0, OPT) \ 193 QPEL_FUNCS_SET (OP, 3, 1, OPT) \ 194 QPEL_FUNCS_SET (OP, 3, 2, OPT) 234 QPEL_MC_SET(put_, _mmx)
241 QPEL_MC_SET(avg_, _3dnow)
253 QPEL_MC_SET(avg_, _mmxext)
263 QPEL_MC_SET(put_, _sse2)
264 QPEL_MC_SET(avg_, _sse2)
275 QPEL_MC_SET(put_, _ssse3)
276 QPEL_MC_SET(avg_, _ssse3)
qpel_mc_func put_pixels_tab[4][16]
#define EXTERNAL_MMX(flags)
Memory handling functions.
av_cold void ff_rv40dsp_init_x86(RV34DSPContext *c)
static atomic_int cpu_flags
Macro definitions for various function/variable attributes.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
#define EXTERNAL_SSE2(flags)
#define INLINE_MMX(flags)
qpel_mc_func avg_pixels_tab[4][16]
RV30/40 decoder motion compensation functions.
#define EXTERNAL_SSSE3(flags)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define EXTERNAL_MMXEXT(flags)
GLint GLenum GLboolean GLsizei stride
h264_chroma_mc_func avg_chroma_pixels_tab[3]
h264_chroma_mc_func put_chroma_pixels_tab[3]
#define EXTERNAL_AMD3DNOW(flags)
#define DEFINE_FN(op, size, insn)