Go to the documentation of this file.
36 static const uint8_t
NNEDI_XDIM[] = { 8, 16, 32, 48, 8, 16, 32 };
37 static const uint8_t
NNEDI_YDIM[] = { 6, 6, 6, 6, 4, 4, 4 };
38 static const uint16_t
NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
105 int src_stride,
int dst_stride,
108 int src_stride,
int dst_stride,
111 const void *
src, ptrdiff_t src_stride,
116 #define OFFSET(x) offsetof(NNEDIContext, x)
117 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
118 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
124 {
"interlaced",
"only deinterlace frames marked as interlaced", 0,
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0,
RFLAGS,
"deint" },
126 {
"af",
"use frame flags, both fields", 0,
AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0,
RFLAGS,
"field" },
127 {
"a",
"use frame flags, single field", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0,
RFLAGS,
"field" },
130 {
"tf",
"use both fields, top first", 0,
AV_OPT_TYPE_CONST, {.i64=2}, 0, 0,
RFLAGS,
"field" },
131 {
"bf",
"use both fields, bottom first", 0,
AV_OPT_TYPE_CONST, {.i64=3}, 0, 0,
RFLAGS,
"field" },
133 {
"nsize",
"set size of local neighborhood around each pixel, used by the predictor neural network",
OFFSET(nsize),
AV_OPT_TYPE_INT, {.i64=6}, 0, 6,
RFLAGS,
"nsize" },
171 outlink->
w =
ctx->inputs[0]->w;
172 outlink->
h =
ctx->inputs[0]->h;
207 int n,
float scale,
float bias)
211 sum =
s->fdsp->scalarproduct_float(kernel,
input, n);
213 y = sum *
scale + bias + 1e-20
f;
220 return x / (1.0f +
fabsf(x));
230 const void *
src, ptrdiff_t src_stride,
231 uint8_t *prescreen,
int N,
235 const float *src_p =
src;
238 const float *
window = src_p - 2 * src_stride - 5;
240 for (
int j = 0; j <
N; j++) {
244 for (
int i = 0;
i < 4;
i++)
245 memcpy(
input +
i * 12,
window +
i * src_stride + j, 12 *
sizeof(
float));
248 for (
int n = 0; n < 4; n++)
253 for (
int n = 0; n < 4; n++)
258 for (
int n = 0; n < 4; n++)
266 const void *
src, ptrdiff_t src_stride,
267 uint8_t *prescreen,
int N,
271 const float *src_p =
src;
274 const float *
window = src_p - 2 * src_stride - 6;
276 for (
int j = 0; j <
N; j += 4) {
280 for (
int i = 0;
i < 4;
i++)
281 memcpy(
input +
i * 16,
window +
i * src_stride + j, 16 *
sizeof(
float));
283 for (
int n = 0; n < 4; n++)
287 for (
int n = 0; n < 4; n++)
290 for (
int n = 0; n < 4; n++)
291 prescreen[j + n] =
state[n + 4] > 0.
f;
297 return nn * model->
nsize;
325 float *buf,
float mstd[4],
333 for (
int i = 0;
i < model->
ydim;
i++) {
334 memcpy(buf,
src, model->
xdim *
sizeof(
float));
336 for (
int j = 0; j < model->
xdim; j++) {
347 mstd[0] = sum *
scale;
350 tmp = sum_sq *
scale - mstd[0] * mstd[0];
351 if (
tmp < FLT_EPSILON) {
355 mstd[1] = sqrtf(
tmp);
356 mstd[2] = 1.0f / mstd[1];
371 static void wae5(
const float *softmax,
const float *el,
372 int n,
float mstd[4])
374 float vsum = 0.0f, wsum = 0.0f;
376 for (
int i = 0;
i < n;
i++) {
382 mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
388 const void *
src, ptrdiff_t src_stride,
void *dst,
389 const uint8_t *prescreen,
int N,
393 const float *src_p =
src;
397 const float *
window = src_p - (model->
ydim / 2) * src_stride - (model->
xdim / 2 - 1);
398 const int filter_size = model->
nsize;
399 const int nns = model->
nns;
401 for (
int i = 0;
i <
N;
i++) {
403 float activation[256 * 2];
413 for (
int nn = 0; nn < nns; nn++)
416 for (
int nn = 0; nn < nns; nn++)
420 wae5(activation, activation + nns, nns, mstd);
423 for (
int nn = 0; nn < nns; nn++)
426 for (
int nn = 0; nn < nns; nn++)
430 wae5(activation, activation + nns, nns, mstd);
433 dst_p[
i] = mstd[3] * (use_q2 ? 0.5f : 1.f);
438 int src_stride,
int dst_stride,
441 for (
int y = 0; y <
height; y++) {
442 for (
int x = 0; x < 32; x++)
443 dst[-x - 1] =
src[x];
445 for (
int x = 0; x <
width; x++)
448 for (
int x = 0; x < 32; x++)
457 int src_stride,
int dst_stride,
460 const uint16_t *
src = (
const uint16_t *)srcp;
464 for (
int y = 0; y <
height; y++) {
465 for (
int x = 0; x < 32; x++)
468 for (
int x = 0; x <
width; x++)
471 for (
int x = 0; x < 32; x++)
480 int src_stride,
int dst_stride,
484 for (
int y = 0; y <
height; y++) {
485 for (
int x = 0; x <
width; x++)
494 int src_stride,
int dst_stride,
498 uint16_t *dst = (uint16_t *)dstp;
502 for (
int y = 0; y <
height; y++) {
503 for (
int x = 0; x <
width; x++)
512 void *dst,
const uint8_t *prescreen,
int n)
514 const float *src_p =
src;
516 const float *
window = src_p - 2 * src_stride;
518 for (
int i = 0;
i < n;
i++) {
524 accum += (-3.0f / 32.0f) *
window[0 * src_stride +
i];
525 accum += (19.0f / 32.0f) *
window[1 * src_stride +
i];
526 accum += (19.0f / 32.0f) *
window[2 * src_stride +
i];
527 accum += (-3.0f / 32.0f) *
window[3 * src_stride +
i];
538 const float in_scale =
s->in_scale;
539 const float out_scale =
s->out_scale;
540 const int depth =
s->depth;
546 for (
int p = 0; p <
s->nb_planes; p++) {
547 const int height =
s->planeheight[p];
548 const int width =
s->planewidth[p];
549 const int slice_start = 2 * ((
height / 2 * jobnr) / nb_jobs);
551 const uint8_t *src_data = in->
data[p];
552 uint8_t *dst_data =
out->data[p];
553 uint8_t *dst =
out->data[p] + slice_start *
out->linesize[p];
554 const int src_linesize = in->
linesize[p];
555 const int dst_linesize =
out->linesize[p];
556 uint8_t *prescreen_buf =
s->prescreen_buf[jobnr];
557 float *srcbuf =
s->input_buf[jobnr];
558 const int srcbuf_stride =
width + 64;
559 float *dstbuf =
s->output_buf[jobnr];
560 const int dstbuf_stride =
width;
561 const int slice_height = (
slice_end - slice_start) / 2;
563 const uint8_t *in_line;
567 if (!(
s->process_plane & (1 << p))) {
575 y_out = slice_start + (tff ^ (slice_start & 1));
576 in_line = src_data + (y_out * src_linesize);
577 out_line = dst_data + (y_out * dst_linesize);
580 memcpy(out_line, in_line,
s->linesize[p]);
582 in_line += src_linesize * 2;
583 out_line += dst_linesize * 2;
586 y_out = slice_start + ((!tff) ^ (slice_start & 1));
588 s->read(src_data +
FFMAX(y_out - 5, tff) * src_linesize,
590 src_linesize * 2, srcbuf_stride,
592 srcbuf += srcbuf_stride;
594 s->read(src_data +
FFMAX(y_out - 3, tff) * src_linesize,
596 src_linesize * 2, srcbuf_stride,
598 srcbuf += srcbuf_stride;
600 s->read(src_data +
FFMAX(y_out - 1, tff) * src_linesize,
602 src_linesize * 2, srcbuf_stride,
604 srcbuf += srcbuf_stride;
606 in_line = src_data +
FFMIN(y_out + 1,
height - 1 - !tff) * src_linesize;
607 out_line = dst_data + (y_out * dst_linesize);
609 s->read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
610 width, slice_height - last_slice, in_scale);
612 y_out += (slice_height - last_slice) * 2;
614 s->read(src_data +
FFMIN(y_out + 1,
height - 1 - !tff) * src_linesize,
615 srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
616 src_linesize * 2, srcbuf_stride,
619 s->read(src_data +
FFMIN(y_out + 3,
height - 1 - !tff) * src_linesize,
620 srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
621 src_linesize * 2, srcbuf_stride,
624 s->read(src_data +
FFMIN(y_out + 5,
height - 1 - !tff) * src_linesize,
625 srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
626 src_linesize * 2, srcbuf_stride,
629 for (
int y = 0; y <
slice_end - slice_start; y += 2) {
631 s->prescreen[
s->pscrn > 1](
ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
632 srcbuf_stride, prescreen_buf,
width,
633 &
s->prescreener[
s->pscrn - 1]);
636 srcbuf + (y / 2) * srcbuf_stride + 32,
638 dstbuf + (y / 2) * dstbuf_stride,
639 prescreen_buf,
width,
640 &
s->coeffs[
s->etype][
s->nnsparam][
s->nsize],
s->qual == 2);
645 dstbuf + (y / 2) * dstbuf_stride,
646 prescreen_buf,
width);
649 s->write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
650 width, slice_height, depth, out_scale);
670 FFMIN(
s->planeheight[1] / 2,
s->nb_threads));
672 if (
s->field == -2 ||
s->field > 1)
673 s->field_n = !
s->field_n;
689 if ((
s->deint && !
s->prev->interlaced_frame) ||
ctx->is_disabled) {
696 s->pts =
s->prev->pts * 2;
698 if (
ret < 0 || (
s->field > -2 &&
s->field < 2)) {
704 s->pts =
s->prev->pts + in->
pts;
729 ctx->outputs[0]->time_base);
733 }
else if (
ret < 0) {
742 memcpy(dst, *
data, n *
sizeof(
float));
757 int filter_size = nns * xdim * ydim;
761 data =
av_calloc(filter_size + bias_size, 4 *
sizeof(
float));
768 coeffs->
nsize = xdim * ydim;
789 copy_weights(&
s->prescreener[0].kernel_l0[0][0], 4 * 48, &bdata);
792 copy_weights(&
s->prescreener[0].kernel_l1[0][0], 4 * 4, &bdata);
795 copy_weights(&
s->prescreener[0].kernel_l2[0][0], 4 * 8, &bdata);
798 for (
int i = 0;
i < 3;
i++) {
800 float kernel_l0_shuffled[4 * 64];
801 float kernel_l1_shuffled[4 * 4];
809 for (
int n = 0; n < 4; n++) {
810 for (
int k = 0; k < 64; k++)
811 data->kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
812 for (
int k = 0; k < 4; k++)
813 data->kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
817 for (
int m = 0; m < 2; m++) {
819 for (
int i = 0;
i < 5;
i++) {
823 for (
int j = 0; j < 7; j++) {
827 const int filter_size = xdim * ydim;
873 for (
int n = 0; n < 4; n++) {
882 for (
int n = 0; n < 4; n++) {
891 const int filter_size = model->
nsize;
892 const int nns = model->
nns;
893 const float scale = 1.f / nns;
895 double softmax_means[256];
896 double elliott_means[256];
897 double mean_filter[48 * 6] = { 0 };
901 for (
int nn = 0; nn < nns; nn++) {
902 softmax_means[nn] =
mean(model->
softmax_q1 + nn * filter_size, filter_size);
903 elliott_means[nn] =
mean(model->
elliott_q1 + nn * filter_size, filter_size);
905 for (
int k = 0; k < filter_size; k++)
906 mean_filter[k] += model->
softmax_q1[nn * filter_size + k] - softmax_means[nn];
909 for (
int k = 0; k < filter_size; k++)
910 mean_filter[k] *=
scale;
914 for (
int nn = 0; nn < nns; nn++) {
915 for (
int k = 0; k < filter_size; k++) {
916 model->
softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
917 model->
elliott_q1[nn * filter_size + k] -= elliott_means[nn];
923 memset(mean_filter, 0,
sizeof(mean_filter));
925 for (
int nn = 0; nn < nns; nn++) {
926 softmax_means[nn] =
mean(model->
softmax_q2 + nn * filter_size, filter_size);
927 elliott_means[nn] =
mean(model->
elliott_q2 + nn * filter_size, filter_size);
929 for (
int k = 0; k < filter_size; k++) {
930 mean_filter[k] += model->
softmax_q2[nn * filter_size + k] - softmax_means[nn];
934 for (
int k = 0; k < filter_size; k++)
935 mean_filter[k] *=
scale;
939 for (
int nn = 0; nn < nns; nn++) {
940 for (
int k = 0; k < filter_size; k++) {
941 model->
softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
942 model->
elliott_q2[nn * filter_size + k] -= elliott_means[nn];
952 FILE *weights_file =
NULL;
953 int64_t weights_size;
964 if (fseek(weights_file, 0, SEEK_END)) {
966 fclose(weights_file);
970 weights_size = ftell(weights_file);
972 if (weights_size == -1) {
973 fclose(weights_file);
977 fclose(weights_file);
982 if (fseek(weights_file, 0, SEEK_SET)) {
983 fclose(weights_file);
990 fclose(weights_file);
996 fclose(weights_file);
1002 fclose(weights_file);
1026 s->depth =
desc->comp[0].depth;
1033 s->planewidth[0] =
s->planewidth[3] =
inlink->w;
1035 s->planeheight[0] =
s->planeheight[3] =
inlink->h;
1037 s->half = ((1 << 8) - 1) / 2.f;
1038 s->out_scale = 1 << (
s->depth - 8);
1039 s->in_scale = 1.f /
s->out_scale;
1060 for (
int i = 0;
i < 2;
i++) {
1061 for (
int j = 0; j < 5; j++) {
1062 for (
int k = 0; k < 7; k++)
1067 s->input_size = (
s->planewidth[0] + 64) * (
s->planeheight[0] + 6);
1068 s->input_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->input_buf));
1072 for (
int i = 0;
i <
s->nb_threads;
i++) {
1073 s->input_buf[
i] =
av_calloc(
s->input_size,
sizeof(**
s->input_buf));
1074 if (!
s->input_buf[
i])
1078 s->output_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->output_buf));
1082 for (
int i = 0;
i <
s->nb_threads;
i++) {
1083 s->output_buf[
i] =
av_calloc(
s->input_size,
sizeof(**
s->output_buf));
1084 if (!
s->output_buf[
i])
1088 s->prescreen_buf =
av_calloc(
s->nb_threads,
sizeof(*
s->prescreen_buf));
1089 if (!
s->prescreen_buf)
1092 for (
int i = 0;
i <
s->nb_threads;
i++) {
1093 s->prescreen_buf[
i] =
av_calloc(
s->planewidth[0],
sizeof(**
s->prescreen_buf));
1094 if (!
s->prescreen_buf[
i])
1105 for (
int i = 0;
i <
s->nb_threads &&
s->prescreen_buf;
i++)
1110 for (
int i = 0;
i <
s->nb_threads &&
s->input_buf;
i++)
1115 for (
int i = 0;
i <
s->nb_threads &&
s->output_buf;
i++)
1121 for (
int i = 0;
i < 2;
i++) {
1122 for (
int j = 0; j < 5; j++) {
1123 for (
int k = 0; k < 7; k++) {
1152 .description =
NULL_IF_CONFIG_SMALL(
"Apply neural network edge directed interpolation intra-only deinterlacer."),
1154 .priv_class = &nnedi_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input, int n, float scale, float bias)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int get_frame(AVFilterContext *ctx, int is_second)
static void subtract_mean_predictor(PredictorCoefficients *model)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
#define FILTER_PIXFMTS_ARRAY(array)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_PIX_FMT_YUVA422P9
This structure describes decoded (raw) audio or video data.
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
int top_field_first
If the content is interlaced, is top field displayed first.
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
static uint8_t half(int a, int b)
#define AV_PIX_FMT_YUV420P10
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const coeffs)
AVFILTER_DEFINE_CLASS(nnedi)
static av_cold void uninit(AVFilterContext *ctx)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * name
Filter name.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
static float elliott(float x)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUVA420P9
static SDL_Window * window
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
const AVFilter ff_vf_nnedi
static double val(void *priv, double ch)
static const uint8_t NNEDI_XDIM[]
static av_always_inline float scale(float x, float s)
#define AV_PIX_FMT_GRAY16
static __device__ float fabsf(float a)
A filter pad used for either input or output.
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
static float softmax_exp(float x)
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVFilterPad inputs[]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_YUV422P16
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static float * allocate(float **ptr, int size)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
static const uint16_t NNEDI_NNS[]
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
float kernel_l0[4][16 *4]
static int request_frame(AVFilterLink *link)
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_GRAY14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_INPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
#define LOCAL_ALIGNED_32(t, v,...)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
PredictorCoefficients coeffs[2][5][7]
static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static void transform_elliott(float *input, int size)
#define AV_PIX_FMT_YUV440P10
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
static const uint8_t NNEDI_YDIM[]
static const AVOption nnedi_options[]
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_YUV422P12
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
#define AV_PIX_FMT_YUV444P12
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, const PredictorCoefficients *const model, int use_q2)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
static void transform_softmax_exp(float *input, int size)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static const AVFilterPad outputs[]
static av_cold int init(AVFilterContext *ctx)
#define DECLARE_ALIGNED(n, t, v)
int interlaced_frame
The content of the picture is interlaced.
static int filter_offset(int nn, const PredictorCoefficients *const model)
#define i(width, name, range_min, range_max)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static enum AVPixelFormat pix_fmts[]
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
void * av_calloc(size_t nmemb, size_t size)
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
#define AV_PIX_FMT_YUV444P9
static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
#define AV_PIX_FMT_YUVA444P9
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
int h
agreed upon image height
static const size_t NNEDI_WEIGHTS_SIZE
#define AV_PIX_FMT_YUVA422P12
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
static float mean(const float *input, int size)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
static void transform(float *input, int size, float mean, float half)
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define FILTER_OUTPUTS(array)
static int read_weights(AVFilterContext *ctx, const float *bdata)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static int config_input(AVFilterLink *inlink)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void copy_weights(float *dst, int n, const float **data)
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GRAY12
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
PrescreenerCoefficients prescreener[4]
static int config_output(AVFilterLink *outlink)