Go to the documentation of this file.
54 c->alf.filter[
LUMA] = alf_filter_luma_8_neon;
55 c->alf.filter[
CHROMA] = alf_filter_chroma_8_neon;
56 }
else if (bd == 10) {
57 c->alf.filter[
LUMA] = alf_filter_luma_10_neon;
58 c->alf.filter[
CHROMA] = alf_filter_chroma_10_neon;
59 }
else if (bd == 12) {
60 c->alf.filter[
LUMA] = alf_filter_luma_12_neon;
61 c->alf.filter[
CHROMA] = alf_filter_chroma_12_neon;
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static atomic_int cpu_flags
#define FF_ARRAY_ELEMS(a)
void ff_vvc_sao_edge_filter_8x8_8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride_dst, const int16_t *sao_offset_val, int eo, int width, int height)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_h26x_sao_band_filter_8x8_8_neon(uint8_t *_dst, const uint8_t *_src, ptrdiff_t stride_dst, ptrdiff_t stride_src, const int16_t *sao_offset_val, int sao_left_class, int width, int height)
#define i(width, name, range_min, range_max)
void ff_vvc_dsp_init_aarch64(VVCDSPContext *const c, const int bd)
void ff_vvc_sao_edge_filter_16x16_8_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride_dst, const int16_t *sao_offset_val, int eo, int width, int height)