29 #define BITS_INV_ACC 5 // 4 or 5 for IEEE 30 #define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11 31 #define SHIFT_INV_COL (1 + BITS_INV_ACC) //6 32 #define RND_INV_ROW (1024 * (6 - BITS_INV_ACC)) 33 #define RND_INV_COL (16 * (BITS_INV_ACC - 3)) 34 #define RND_INV_CORR (RND_INV_COL - 1) 36 #define BITS_FRW_ACC 3 // 2 or 3 for accuracy 37 #define SHIFT_FRW_COL BITS_FRW_ACC 38 #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17) 39 #define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1)) 42 13036, 13036, 13036, 13036,
43 27146, 27146, 27146, 27146,
44 -21746,-21746,-21746,-21746,
45 23170, 23170, 23170, 23170
60 16384, 21407, 16384, 8867,
61 16384, 8867,-16384,-21407,
62 16384, -8867, 16384,-21407,
63 -16384, 21407, 16384, -8867,
64 22725, 19266, 19266, -4520,
65 12873, 4520,-22725,-12873,
66 12873,-22725, 4520,-12873,
67 4520, 19266, 19266,-22725,
69 22725, 29692, 22725, 12299,
70 22725, 12299,-22725,-29692,
71 22725,-12299, 22725,-29692,
72 -22725, 29692, 22725,-12299,
73 31521, 26722, 26722, -6270,
74 17855, 6270,-31521,-17855,
75 17855,-31521, 6270,-17855,
76 6270, 26722, 26722,-31521,
78 21407, 27969, 21407, 11585,
79 21407, 11585,-21407,-27969,
80 21407,-11585, 21407,-27969,
81 -21407, 27969, 21407,-11585,
82 29692, 25172, 25172, -5906,
83 16819, 5906,-29692,-16819,
84 16819,-29692, 5906,-16819,
85 5906, 25172, 25172,-29692,
87 19266, 25172, 19266, 10426,
88 19266, 10426,-19266,-25172,
89 19266,-10426, 19266,-25172,
90 -19266, 25172, 19266,-10426,
91 26722, 22654, 22654, -5315,
92 15137, 5315,-26722,-15137,
93 15137,-26722, 5315,-15137,
94 5315, 22654, 22654,-26722,
97 #define DCT_8_INV_ROW_MMI(A1,A2,A3,A4) \ 98 "dli $10, 0x88 \n\t" \ 99 "ldc1 $f4, "#A1" \n\t" \ 100 "dmtc1 $10, $f16 \n\t" \ 101 "ldc1 $f10, 8+"#A1" \n\t" \ 102 "ldc1 $f6, "#A3" \n\t" \ 103 "pshufh $f0, $f4, $f16 \n\t" \ 104 "ldc1 $f8, 8+"#A3" \n\t" \ 105 "ldc1 $f12, 32+"#A3" \n\t" \ 106 "pmaddhw $f6, $f6, $f0 \n\t" \ 107 "dli $10, 0xdd \n\t" \ 108 "pshufh $f2, $f10, $f16 \n\t" \ 109 "dmtc1 $10, $f16 \n\t" \ 110 "pmaddhw $f8, $f8, $f2 \n\t" \ 111 "ldc1 $f14, 40+"#A3" \n\t" \ 112 "pshufh $f4, $f4, $f16 \n\t" \ 113 "pmaddhw $f12, $f12, $f4 \n\t" \ 114 "pshufh $f10, $f10, $f16 \n\t" \ 115 "ldc1 $f18, "#A4" \n\t" \ 116 "pmaddhw $f14, $f14, $f10 \n\t" \ 117 "paddw $f6, $f6, $f18 \n\t" \ 118 "ldc1 $f16, 16+"#A3" \n\t" \ 119 "pmaddhw $f0, $f0, $f16 \n\t" \ 120 "ldc1 $f16, 24+"#A3" \n\t" \ 121 "paddw $f6, $f6, $f8 \n\t" \ 122 "pmaddhw $f2, $f2, $f16 \n\t" \ 123 "ldc1 $f16, 48+"#A3" \n\t" \ 124 "pmaddhw $f4, $f4, $f16 \n\t" \ 125 "ldc1 $f16, 56+"#A3" \n\t" \ 126 "paddw $f12, $f12, $f14 \n\t" \ 128 "pmaddhw $f10, $f10, $f16 \n\t" \ 129 "dmtc1 $10, $f16 \n\t" \ 130 "psubw $f8, $f6, $f12 \n\t" \ 131 "paddw $f6, $f6, $f12 \n\t" \ 132 "paddw $f0, $f0, $f18 \n\t" \ 133 "psraw $f6, $f6, $f16 \n\t" \ 134 "paddw $f0, $f0, $f2 \n\t" \ 135 "paddw $f4, $f4, $f10 \n\t" \ 136 "psraw $f8, $f8, $f16 \n\t" \ 137 "psubw $f14, $f0, $f4 \n\t" \ 138 "paddw $f0, $f0, $f4 \n\t" \ 139 "psraw $f0, $f0, $f16 \n\t" \ 140 "psraw $f14, $f14, $f16 \n\t" \ 141 "dli $10, 0xb1 \n\t" \ 142 "packsswh $f6, $f6, $f0 \n\t" \ 143 "dmtc1 $10, $f16 \n\t" \ 144 "packsswh $f14, $f14, $f8 \n\t" \ 145 "sdc1 $f6, "#A2" \n\t" \ 146 "pshufh $f14, $f14, $f16 \n\t" \ 147 "sdc1 $f14, 8+"#A2" \n\t" \ 150 #define DCT_8_INV_COL(A1,A2) \ 151 "ldc1 $f2, 2*8(%3) \n\t" \ 152 "ldc1 $f6, 16*3+"#A1" \n\t" \ 153 "ldc1 $f10, 16*5+"#A1" \n\t" \ 154 "pmulhh $f0, $f2, $f6 \n\t" \ 155 "ldc1 $f4, 0(%3) \n\t" \ 156 "pmulhh $f2, $f2, $f10 \n\t" \ 157 "ldc1 $f14, 16*7+"#A1" \n\t" \ 158 "ldc1 $f12, 16*1+"#A1" \n\t" \ 159 "pmulhh $f8, $f4, $f14 \n\t" \ 160 "paddsh $f0, $f0, $f6 \n\t" \ 161 "pmulhh $f4, $f4, $f12 \n\t" \ 162 "paddsh $f2, $f2, $f6 \n\t" \ 163 "psubsh $f0, $f0, $f10 \n\t" \ 164 "ldc1 $f6, 3*8(%3) \n\t" \ 165 "paddsh $f2, $f2, $f10 \n\t" \ 166 "paddsh $f8, $f8, $f12 \n\t" \ 167 "psubsh $f4, $f4, $f14 \n\t" \ 168 "paddsh $f10, $f8, $f2 \n\t" \ 169 "psubsh $f12, $f4, $f0 \n\t" \ 170 "psubsh $f8, $f8, $f2 \n\t" \ 171 "paddsh $f4, $f4, $f0 \n\t" \ 172 "ldc1 $f14, 1*8(%3) \n\t" \ 173 "sdc1 $f10, 3*16+"#A2" \n\t" \ 174 "paddsh $f2, $f8, $f4 \n\t" \ 175 "sdc1 $f12, 5*16+"#A2" \n\t" \ 176 "psubsh $f8, $f8, $f4 \n\t" \ 177 "ldc1 $f10, 2*16+"#A1" \n\t" \ 178 "ldc1 $f12, 6*16+"#A1" \n\t" \ 179 "pmulhh $f0, $f14, $f10 \n\t" \ 180 "pmulhh $f14, $f14, $f12 \n\t" \ 181 "pmulhh $f2, $f2, $f6 \n\t" \ 182 "ldc1 $f4, 0*16+"#A1" \n\t" \ 183 "pmulhh $f8, $f8, $f6 \n\t" \ 184 "psubsh $f0, $f0, $f12 \n\t" \ 185 "ldc1 $f12, 4*16+"#A1" \n\t" \ 186 "paddsh $f14, $f14, $f10 \n\t" \ 187 "psubsh $f6, $f4, $f12 \n\t" \ 188 "paddsh $f4, $f4, $f12 \n\t" \ 189 "paddsh $f10, $f4, $f14 \n\t" \ 190 "psubsh $f12, $f6, $f0 \n\t" \ 191 "psubsh $f4, $f4, $f14 \n\t" \ 192 "paddsh $f6, $f6, $f0 \n\t" \ 193 "paddsh $f2, $f2, $f2 \n\t" \ 194 "paddsh $f8, $f8, $f8 \n\t" \ 195 "psubsh $f14, $f6, $f2 \n\t" \ 197 "paddsh $f6, $f6, $f2 \n\t" \ 198 "dmtc1 $10, $f16 \n\t" \ 199 "psubsh $f0, $f12, $f8 \n\t" \ 200 "paddsh $f12, $f12, $f8 \n\t" \ 201 "psrah $f6, $f6, $f16 \n\t" \ 202 "psrah $f12, $f12, $f16 \n\t" \ 203 "ldc1 $f2, 3*16+"#A2" \n\t" \ 204 "psrah $f14, $f14, $f16 \n\t" \ 205 "psrah $f0, $f0, $f16 \n\t" \ 206 "sdc1 $f6, 1*16+"#A2" \n\t" \ 207 "psubsh $f8, $f10, $f2 \n\t" \ 208 "paddsh $f10, $f10, $f2 \n\t" \ 209 "sdc1 $f12, 2*16+"#A2" \n\t" \ 210 "ldc1 $f6, 5*16+"#A2" \n\t" \ 211 "psrah $f10, $f10, $f16 \n\t" \ 212 "psrah $f8, $f8, $f16 \n\t" \ 213 "sdc1 $f0, 5*16+"#A2" \n\t" \ 214 "psubsh $f12, $f4, $f6 \n\t" \ 215 "paddsh $f4, $f4, $f6 \n\t" \ 216 "sdc1 $f14, 6*16+"#A2" \n\t" \ 217 "sdc1 $f10, 0*16+"#A2" \n\t" \ 218 "psrah $f4, $f4, $f16 \n\t" \ 219 "sdc1 $f8, 7*16+"#A2" \n\t" \ 220 "psrah $f12, $f12, $f16 \n\t" \ 221 "sdc1 $f4, 3*16+"#A2" \n\t" \ 222 "sdc1 $f12, 4*16+"#A2" \n\t" \ static const int32_t rounder_0[2 *8]
static const int16_t tab_i_04_mmi[32 *4]
void ff_add_pixels_clamped_mmi(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size)
The exact code depends on how similar the blocks are and how related they are to the block
void ff_xvid_idct_put_mmi(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
void ff_xvid_idct_add_mmi(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
static const int16_t tg_1_16[4 *4]
void ff_put_pixels_clamped_mmi(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size)
#define DCT_8_INV_COL(A1, A2)
void ff_xvid_idct_mmi(int16_t *block)
#define DCT_8_INV_ROW_MMI(A1, A2, A3, A4)
__asm__(".macro parse_r var r\n\t""\\var = -1\n\t"_IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)".iflt \\var\n\t"".error \"Unable to parse register name \\r\"\n\t"".endif\n\t"".endm")