38 static const uint16_t
NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
60 int xdim,
ydim, nns, nsize;
116 int src_stride,
int dst_stride,
119 int src_stride,
int dst_stride,
122 const void *
src, ptrdiff_t src_stride,
126 #define OFFSET(x) offsetof(NNEDIContext, x) 127 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM 128 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 134 {
"interlaced",
"only deinterlace frames marked as interlaced", 0,
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0,
RFLAGS,
"deint" },
136 {
"af",
"use frame flags, both fields", 0,
AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0,
RFLAGS,
"field" },
137 {
"a",
"use frame flags, single field", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0,
RFLAGS,
"field" },
140 {
"tf",
"use both fields, top first", 0,
AV_OPT_TYPE_CONST, {.i64=2}, 0, 0,
RFLAGS,
"field" },
141 {
"bf",
"use both fields, bottom first", 0,
AV_OPT_TYPE_CONST, {.i64=3}, 0, 0,
RFLAGS,
"field" },
143 {
"nsize",
"set size of local neighborhood around each pixel, used by the predictor neural network",
OFFSET(nsize),
AV_OPT_TYPE_INT, {.i64=6}, 0, 6,
RFLAGS,
"nsize" },
226 int n,
float scale,
float bias)
232 return sum * scale + bias;
237 return x / (1.0f +
fabsf(x));
247 const void *
src, ptrdiff_t src_stride,
253 const float *src_p =
src;
256 const float *
window = src_p - 2 * src_stride - 5;
258 for (
int j = 0; j <
N; j++) {
262 for (
int i = 0;
i < 4;
i++)
263 memcpy(
input +
i * 12, window +
i * src_stride + j, 12 *
sizeof(
float));
266 for (
int n = 0; n < 4; n++)
271 for (
int n = 0; n < 4; n++)
276 for (
int n = 0; n < 4; n++)
279 prescreen[j] =
FFMAX(state[10], state[11]) <=
FFMAX(state[8], state[9]) ? 255 : 0;
284 const void *
src, ptrdiff_t src_stride,
290 const float *src_p =
src;
293 const float *
window = src_p - 2 * src_stride - 6;
295 for (
int j = 0; j <
N; j += 4) {
299 for (
int i = 0;
i < 4;
i++)
300 memcpy(
input +
i * 16, window +
i * src_stride + j, 16 *
sizeof(
float));
302 for (
int n = 0; n < 4; n++)
306 for (
int n = 0; n < 4; n++)
309 for (
int n = 0; n < 4; n++)
310 prescreen[j + n] = state[n + 4] > 0.
f;
316 return nn * model->
nsize;
344 float *buf,
float mstd[4],
351 for (
int i = 0;
i < model->
ydim;
i++) {
352 memcpy(buf, src, model->
xdim *
sizeof(
float));
354 for (
int j = 0; j < model->
xdim; j++) {
355 const float val = src[j];
365 mstd[0] = sum / model->
nsize;
368 tmp = sum_sq / model->
nsize - mstd[0] * mstd[0];
369 if (tmp < FLT_EPSILON) {
373 mstd[1] = sqrtf(tmp);
374 mstd[2] = 1.0f / mstd[1];
380 return expf(av_clipf(x, -80.
f, 80.
f));
389 static void wae5(
const float *softmax,
const float *el,
390 int n,
float mstd[4])
392 float vsum = 0.0f, wsum = 0.0f;
394 for (
int i = 0;
i < n;
i++) {
400 mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
406 const void *
src, ptrdiff_t src_stride,
void *dst,
408 void *
data,
int use_q2)
412 const float *src_p =
src;
416 const float *
window = src_p - (model->
ydim / 2) * src_stride - (model->
xdim / 2 - 1);
417 int filter_size = model->
nsize;
418 int nns = model->
nns;
420 for (
int i = 0;
i <
N;
i++) {
422 float activation[256 * 2];
432 for (
int nn = 0; nn < nns; nn++)
435 for (
int nn = 0; nn < nns; nn++)
439 wae5(activation, activation + nns, nns, mstd);
442 for (
int nn = 0; nn < nns; nn++)
445 for (
int nn = 0; nn < nns; nn++)
449 wae5(activation, activation + nns, nns, mstd);
452 dst_p[
i] = mstd[3] / (use_q2 ? 2 : 1);
457 int src_stride,
int dst_stride,
460 for (
int y = 0; y <
height; y++) {
461 for (
int x = 0; x < 32; x++)
462 dst[-x - 1] = src[x];
464 for (
int x = 0; x <
width; x++)
467 for (
int x = 0; x < 32; x++)
468 dst[width + x] = src[width - x - 1];
476 int src_stride,
int dst_stride,
479 const uint16_t *
src = (
const uint16_t *)srcp;
483 for (
int y = 0; y <
height; y++) {
484 for (
int x = 0; x < 32; x++)
485 dst[-x - 1] = src[x] * scale;
487 for (
int x = 0; x <
width; x++)
488 dst[x] = src[x] * scale;
490 for (
int x = 0; x < 32; x++)
491 dst[width + x] = src[width - x - 1] * scale;
499 int src_stride,
int dst_stride,
503 for (
int y = 0; y <
height; y++) {
504 for (
int x = 0; x <
width; x++)
505 dst[x] = av_clip_uint8(src[x]);
513 int src_stride,
int dst_stride,
517 uint16_t *dst = (uint16_t *)dstp;
521 for (
int y = 0; y <
height; y++) {
522 for (
int x = 0; x <
width; x++)
531 void *dst,
const uint8_t *prescreen,
int n)
533 const float *src_p =
src;
535 const float *
window = src_p - 2 * src_stride;
537 for (
int i = 0;
i < n;
i++) {
543 accum += (-3.0f / 32.0f) * window[0 * src_stride + i];
544 accum += (19.0f / 32.0f) * window[1 * src_stride + i];
545 accum += (19.0f / 32.0f) * window[2 * src_stride + i];
546 accum += (-3.0f / 32.0f) * window[3 * src_stride + i];
559 const int depth = s->
depth;
568 const int slice_start = 2 * ((height / 2 * jobnr) / nb_jobs);
569 const int slice_end = 2 * ((height / 2 * (jobnr+1)) / nb_jobs);
573 const int src_linesize = in->
linesize[p];
574 const int dst_linesize = out->
linesize[p];
577 const int srcbuf_stride = width + 64;
579 const int dstbuf_stride =
width;
580 const int slice_height = (slice_end - slice_start) / 2;
581 const int last_slice = slice_end ==
height;
590 s->
linesize[p], slice_end - slice_start);
594 y_out = slice_start + (tff ^ (slice_start & 1));
595 in_line = src_data + (y_out * src_linesize);
596 out_line = dst_data + (y_out * dst_linesize);
598 while (y_out < slice_end) {
599 memcpy(out_line, in_line, s->
linesize[p]);
601 in_line += src_linesize * 2;
602 out_line += dst_linesize * 2;
605 y_out = slice_start + ((!tff) ^ (slice_start & 1));
607 s->
read(src_data +
FFMAX(y_out - 5, tff) * src_linesize,
609 src_linesize * 2, srcbuf_stride,
611 srcbuf += srcbuf_stride;
613 s->
read(src_data +
FFMAX(y_out - 3, tff) * src_linesize,
615 src_linesize * 2, srcbuf_stride,
617 srcbuf += srcbuf_stride;
619 s->
read(src_data +
FFMAX(y_out - 1, tff) * src_linesize,
621 src_linesize * 2, srcbuf_stride,
623 srcbuf += srcbuf_stride;
625 in_line = src_data +
FFMIN(y_out + 1, height - 1 - !tff) * src_linesize;
626 out_line = dst_data + (y_out * dst_linesize);
628 s->
read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
629 width, slice_height - last_slice, in_scale);
631 y_out += (slice_height - last_slice) * 2;
633 s->
read(src_data +
FFMIN(y_out + 1, height - 1 - !tff) * src_linesize,
634 srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
635 src_linesize * 2, srcbuf_stride,
638 s->
read(src_data +
FFMIN(y_out + 3, height - 1 - !tff) * src_linesize,
639 srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
640 src_linesize * 2, srcbuf_stride,
643 s->
read(src_data +
FFMIN(y_out + 5, height - 1 - !tff) * src_linesize,
644 srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
645 src_linesize * 2, srcbuf_stride,
648 for (
int y = 0; y < slice_end - slice_start; y += 2) {
650 s->
prescreen[1](
ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
651 srcbuf_stride, prescreen_buf, width,
653 }
else if (s->
pscrn == 1) {
654 s->
prescreen[0](
ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
655 srcbuf_stride, prescreen_buf, width,
660 srcbuf + (y / 2) * srcbuf_stride + 32,
662 dstbuf + (y / 2) * dstbuf_stride,
663 prescreen_buf, width,
669 dstbuf + (y / 2) * dstbuf_stride,
670 prescreen_buf, width);
673 s->
write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
674 width, slice_height, depth, out_scale);
710 }
else if (s->
field > 1 ||
806 }
else if (ret < 0) {
815 memcpy(dst, *data, n *
sizeof(
float));
830 int filter_size = nns * xdim * ydim;
841 coeffs->
nsize = xdim * ydim;
871 for (
int i = 0;
i < 3;
i++) {
873 float kernel_l0_shuffled[4 * 64];
874 float kernel_l1_shuffled[4 * 4];
882 for (
int n = 0; n < 4; n++) {
883 for (
int k = 0; k < 64; k++)
884 data->
kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
885 for (
int k = 0; k < 4; k++)
886 data->
kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
890 for (
int m = 0; m < 2; m++) {
892 for (
int i = 0;
i < 5;
i++) {
896 for (
int j = 0; j < 7; j++) {
900 int filter_size = xdim * ydim;
941 input[
i] = (input[
i] - mean) /
half;
946 for (
int n = 0; n < 4; n++) {
955 for (
int n = 0; n < 4; n++) {
964 int filter_size = model->
nsize;
965 int nns = model->
nns;
967 float softmax_means[256];
968 float elliott_means[256];
969 float mean_filter[48 * 6];
973 for (
int nn = 0; nn < nns; nn++) {
974 softmax_means[nn] =
mean(model->
softmax_q1 + nn * filter_size, filter_size);
975 elliott_means[nn] =
mean(model->
elliott_q1 + nn * filter_size, filter_size);
977 for (
int k = 0; k < filter_size; k++)
978 mean_filter[k] += model->
softmax_q1[nn * filter_size + k] - softmax_means[nn];
981 for (
int k = 0; k < filter_size; k++)
982 mean_filter[k] /= nns;
986 for (
int nn = 0; nn < nns; nn++) {
987 for (
int k = 0; k < filter_size; k++) {
988 model->
softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
989 model->
elliott_q1[nn * filter_size + k] -= elliott_means[nn];
995 memset(mean_filter, 0, 48 * 6 *
sizeof(
float));
997 for (
int nn = 0; nn < nns; nn++) {
998 softmax_means[nn] =
mean(model->
softmax_q2 + nn * filter_size, filter_size);
999 elliott_means[nn] =
mean(model->
elliott_q2 + nn * filter_size, filter_size);
1001 for (
int k = 0; k < filter_size; k++) {
1002 mean_filter[k] += model->
softmax_q2[nn * filter_size + k] - softmax_means[nn];
1006 for (
int k = 0; k < filter_size; k++)
1007 mean_filter[k] /= nns;
1011 for (
int nn = 0; nn < nns; nn++) {
1012 for (
int k = 0; k < filter_size; k++) {
1013 model->
softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
1014 model->
elliott_q2[nn * filter_size + k] -= elliott_means[nn];
1024 FILE *weights_file =
NULL;
1025 int64_t weights_size;
1031 if (!weights_file) {
1036 if (fseek(weights_file, 0, SEEK_END)) {
1038 fclose(weights_file);
1042 weights_size = ftell(weights_file);
1044 if (weights_size == -1) {
1045 fclose(weights_file);
1049 fclose(weights_file);
1054 if (fseek(weights_file, 0, SEEK_SET)) {
1055 fclose(weights_file);
1062 fclose(weights_file);
1068 fclose(weights_file);
1074 fclose(weights_file);
1109 s->
half = ((1 << 8) - 1) / 2.f;
1132 for (
int i = 0;
i < 2;
i++) {
1133 for (
int j = 0; j < 5; j++) {
1134 for (
int k = 0; k < 7; k++)
1164 for (
int i = 0;
i < 2;
i++) {
1165 for (
int j = 0; j < 5; j++) {
1166 for (
int k = 0; k < 7; k++) {
1197 .description =
NULL_IF_CONFIG_SMALL(
"Apply neural network edge directed interpolation intra-only deinterlacer."),
1199 .priv_class = &nnedi_class,
static int filter_frame(AVFilterLink *inlink, AVFrame *src)
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, void *data)
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
#define AV_PIX_FMT_YUVA422P16
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
static void transform_elliott(float *input, int size)
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRAP10
static int config_input(AVFilterLink *inlink)
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
static const uint8_t NNEDI_YDIM[]
Main libavfilter public API header.
int h
agreed upon image height
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, void *data)
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
PrescreenerNewCoefficients prescreener_new[3]
#define AV_PIX_FMT_GBRP10
static const uint16_t NNEDI_NNS[]
PredictorCoefficients coeffs[2][5][7]
static int read_weights(AVFilterContext *ctx, const float *bdata)
#define AV_PIX_FMT_YUV420P12
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
int is_disabled
the enabled state from the last expression evaluation
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
#define AV_PIX_FMT_GRAY10
static void transform_softmax_exp(float *input, int size)
const char * name
Pad name.
#define AV_PIX_FMT_GRAY12
AVFilterLink ** inputs
array of pointers to input links
static int request_frame(AVFilterLink *link)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int filter_offset(int nn, const PredictorCoefficients *const model)
#define LOCAL_ALIGNED_32(t, v,...)
static const AVFilterPad outputs[]
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define AV_PIX_FMT_YUVA420P9
static void subtract_mean_old(PrescreenerOldCoefficients *coeffs, float half)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AVERROR_EOF
End of file.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
static av_cold void uninit(AVFilterContext *ctx)
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
static __device__ float fabsf(float a)
static void copy_weights(float *dst, int n, const float **data)
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
A filter pad used for either input or output.
A link between two filters.
AVFILTER_DEFINE_CLASS(nnedi)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, void *data, int use_q2)
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
static void transform(float *input, int size, float mean, float half)
#define AV_PIX_FMT_YUVA444P16
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P10
static float mean(const float *input, int size)
#define AV_PIX_FMT_GBRAP16
static int get_frame(AVFilterContext *ctx, int is_second)
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
#define AV_PIX_FMT_YUV422P9
static SDL_Window * window
static int query_formats(AVFilterContext *ctx)
static void subtract_mean_new(PrescreenerNewCoefficients *coeffs, float half)
#define AV_PIX_FMT_GBRP16
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define AV_PIX_FMT_GRAY16
static const uint8_t NNEDI_XDIM[]
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUVA444P12
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVFilterPad inputs[]
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
AVFilterContext * src
source filter
#define AV_PIX_FMT_YUVA444P10
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int format
agreed upon media format
float kernel_l0[4][14 *4]
#define AV_PIX_FMT_YUV420P16
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
#define AV_PIX_FMT_YUV420P14
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define AV_PIX_FMT_GRAY14
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Rational number (pair of numerator and denominator).
static float dot_dsp(NNEDIContext *s, const float *kernel, const float *input, int n, float scale, float bias)
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_PIX_FMT_YUV420P9
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
PrescreenerOldCoefficients prescreener_old
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
common internal and external API header
planar GBRA 4:4:4:4 32bpp
static int config_output(AVFilterLink *outlink)
#define AV_PIX_FMT_YUVA444P9
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
static void subtract_mean_predictor(PredictorCoefficients *model)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, void *data)
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
static float softmax_exp(float x)
float kernel_l0[4][16 *4]
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
int top_field_first
If the content is interlaced, is top field displayed first.
static av_cold int init(AVFilterContext *ctx)
AVFilterContext * dst
dest filter
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static float * allocate(float **ptr, int size)
static uint8_t half(int a, int b)
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
static const AVOption nnedi_options[]
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define av_malloc_array(a, b)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
#define AV_PIX_FMT_YUV422P16
static const size_t NNEDI_WEIGHTS_SIZE
static float elliott(float x)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_PIX_FMT_YUVA422P12
#define AV_CEIL_RSHIFT(a, b)
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.