FFmpeg
vf_yadif_cuda.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2018 Philip Langdale <philipl@overt.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/hwcontext.h"
24 #include "libavutil/cuda_check.h"
25 #include "internal.h"
26 #include "yadif.h"
27 
28 #include "cuda/load_helper.h"
29 
30 extern const unsigned char ff_vf_yadif_cuda_ptx_data[];
31 extern const unsigned int ff_vf_yadif_cuda_ptx_len;
32 
33 typedef struct DeintCUDAContext {
35 
40 
41  CUmodule cu_module;
42  CUfunction cu_func_uchar;
43  CUfunction cu_func_uchar2;
44  CUfunction cu_func_ushort;
45  CUfunction cu_func_ushort2;
47 
48 #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
49 #define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1))
50 #define BLOCKX 32
51 #define BLOCKY 16
52 
53 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, s->hwctx->internal->cuda_dl, x)
54 
55 static CUresult call_kernel(AVFilterContext *ctx, CUfunction func,
56  CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next,
57  CUarray_format format, int channels,
58  int src_width, // Width is pixels per channel
59  int src_height, // Height is pixels per channel
60  int src_pitch, // Pitch is bytes
61  CUdeviceptr dst,
62  int dst_width, // Width is pixels per channel
63  int dst_height, // Height is pixels per channel
64  int dst_pitch, // Pitch is pixels per channel
65  int parity, int tff)
66 {
67  DeintCUDAContext *s = ctx->priv;
68  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
69  CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0;
70  int ret;
71  int skip_spatial_check = s->yadif.mode&2;
72 
73  void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next,
74  &dst_width, &dst_height, &dst_pitch,
75  &src_width, &src_height, &parity, &tff,
76  &skip_spatial_check };
77 
78  CUDA_TEXTURE_DESC tex_desc = {
79  .filterMode = CU_TR_FILTER_MODE_POINT,
80  .flags = CU_TRSF_READ_AS_INTEGER,
81  };
82 
83  CUDA_RESOURCE_DESC res_desc = {
84  .resType = CU_RESOURCE_TYPE_PITCH2D,
85  .res.pitch2D.format = format,
86  .res.pitch2D.numChannels = channels,
87  .res.pitch2D.width = src_width,
88  .res.pitch2D.height = src_height,
89  .res.pitch2D.pitchInBytes = src_pitch,
90  };
91 
92  res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev;
93  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL));
94  if (ret < 0)
95  goto exit;
96 
97  res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur;
98  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL));
99  if (ret < 0)
100  goto exit;
101 
102  res_desc.res.pitch2D.devPtr = (CUdeviceptr)next;
103  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL));
104  if (ret < 0)
105  goto exit;
106 
107  ret = CHECK_CU(cu->cuLaunchKernel(func,
108  DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1,
109  BLOCKX, BLOCKY, 1,
110  0, s->hwctx->stream, args, NULL));
111 
112 exit:
113  if (tex_prev)
114  CHECK_CU(cu->cuTexObjectDestroy(tex_prev));
115  if (tex_cur)
116  CHECK_CU(cu->cuTexObjectDestroy(tex_cur));
117  if (tex_next)
118  CHECK_CU(cu->cuTexObjectDestroy(tex_next));
119 
120  return ret;
121 }
122 
123 static void filter(AVFilterContext *ctx, AVFrame *dst,
124  int parity, int tff)
125 {
126  DeintCUDAContext *s = ctx->priv;
127  YADIFContext *y = &s->yadif;
128  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
129  CUcontext dummy;
130  int i, ret;
131 
132  ret = CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
133  if (ret < 0)
134  return;
135 
136  for (i = 0; i < y->csp->nb_components; i++) {
137  CUfunction func;
138  CUarray_format format;
139  int pixel_size, channels;
140  const AVComponentDescriptor *comp = &y->csp->comp[i];
141 
142  if (comp->plane < i) {
143  // We process planes as a whole, so don't reprocess
144  // them for additional components
145  continue;
146  }
147 
148  pixel_size = (comp->depth + comp->shift) / 8;
149  channels = comp->step / pixel_size;
150  if (pixel_size > 2 || channels > 2) {
151  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
152  goto exit;
153  }
154  switch (pixel_size) {
155  case 1:
156  func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2;
157  format = CU_AD_FORMAT_UNSIGNED_INT8;
158  break;
159  case 2:
160  func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2;
161  format = CU_AD_FORMAT_UNSIGNED_INT16;
162  break;
163  default:
164  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
165  goto exit;
166  }
168  "Deinterlacing plane %d: pixel_size: %d channels: %d\n",
169  comp->plane, pixel_size, channels);
171  (CUdeviceptr)y->prev->data[i],
172  (CUdeviceptr)y->cur->data[i],
173  (CUdeviceptr)y->next->data[i],
174  format, channels,
175  AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0),
176  AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0),
177  y->cur->linesize[i],
178  (CUdeviceptr)dst->data[i],
179  AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0),
180  AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0),
181  dst->linesize[i] / comp->step,
182  parity, tff);
183  }
184 
185 exit:
186  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
187  return;
188 }
189 
191 {
192  CUcontext dummy;
193  DeintCUDAContext *s = ctx->priv;
194  YADIFContext *y = &s->yadif;
195 
196  if (s->hwctx && s->cu_module) {
197  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
198  CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
199  CHECK_CU(cu->cuModuleUnload(s->cu_module));
200  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
201  }
202 
203  av_frame_free(&y->prev);
204  av_frame_free(&y->cur);
205  av_frame_free(&y->next);
207 
208  av_buffer_unref(&s->device_ref);
209  s->hwctx = NULL;
210  av_buffer_unref(&s->input_frames_ref);
211  s->input_frames = NULL;
212 }
213 
215 {
216  AVFilterContext *ctx = inlink->dst;
217  DeintCUDAContext *s = ctx->priv;
218 
219  if (!inlink->hw_frames_ctx) {
220  av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is "
221  "required to associate the processing device.\n");
222  return AVERROR(EINVAL);
223  }
224 
225  s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
226  if (!s->input_frames_ref) {
227  av_log(ctx, AV_LOG_ERROR, "A input frames reference create "
228  "failed.\n");
229  return AVERROR(ENOMEM);
230  }
231  s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data;
232 
233  return 0;
234 }
235 
237 {
238  AVHWFramesContext *output_frames;
239  AVFilterContext *ctx = link->src;
240  DeintCUDAContext *s = ctx->priv;
241  YADIFContext *y = &s->yadif;
242  CudaFunctions *cu;
243  int ret = 0;
244  CUcontext dummy;
245 
246  av_assert0(s->input_frames);
247  s->device_ref = av_buffer_ref(s->input_frames->device_ref);
248  if (!s->device_ref) {
249  av_log(ctx, AV_LOG_ERROR, "A device reference create "
250  "failed.\n");
251  return AVERROR(ENOMEM);
252  }
253  s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx;
254  cu = s->hwctx->internal->cuda_dl;
255 
256  link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref);
257  if (!link->hw_frames_ctx) {
258  av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context "
259  "for output.\n");
260  ret = AVERROR(ENOMEM);
261  goto exit;
262  }
263 
264  output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
265 
266  output_frames->format = AV_PIX_FMT_CUDA;
267  output_frames->sw_format = s->input_frames->sw_format;
268  output_frames->width = ctx->inputs[0]->w;
269  output_frames->height = ctx->inputs[0]->h;
270 
271  output_frames->initial_pool_size = 4;
272 
274  if (ret < 0)
275  goto exit;
276 
278  if (ret < 0) {
279  av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame "
280  "context for output: %d\n", ret);
281  goto exit;
282  }
283 
284  link->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
285  link->w = ctx->inputs[0]->w;
286  link->h = ctx->inputs[0]->h;
287 
288  if(y->mode & 1)
289  link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
290  (AVRational){2, 1});
291  else
292  link->frame_rate = ctx->inputs[0]->frame_rate;
293 
294  ret = ff_ccfifo_init(&y->cc_fifo, link->frame_rate, ctx);
295  if (ret < 0) {
296  av_log(ctx, AV_LOG_ERROR, "Failure to setup CC FIFO queue\n");
297  goto exit;
298  }
299 
300  if (link->w < 3 || link->h < 3) {
301  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
302  ret = AVERROR(EINVAL);
303  goto exit;
304  }
305 
306  y->csp = av_pix_fmt_desc_get(output_frames->sw_format);
307  y->filter = filter;
308 
309  ret = CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
310  if (ret < 0)
311  goto exit;
312 
314  if (ret < 0)
315  goto exit;
316 
317  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "yadif_uchar"));
318  if (ret < 0)
319  goto exit;
320 
321  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "yadif_uchar2"));
322  if (ret < 0)
323  goto exit;
324 
325  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "yadif_ushort"));
326  if (ret < 0)
327  goto exit;
328 
329  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "yadif_ushort2"));
330  if (ret < 0)
331  goto exit;
332 
333 exit:
334  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
335 
336  return ret;
337 }
338 
339 static const AVClass yadif_cuda_class = {
340  .class_name = "yadif_cuda",
341  .item_name = av_default_item_name,
342  .option = ff_yadif_options,
343  .version = LIBAVUTIL_VERSION_INT,
344  .category = AV_CLASS_CATEGORY_FILTER,
345 };
346 
347 static const AVFilterPad deint_cuda_inputs[] = {
348  {
349  .name = "default",
350  .type = AVMEDIA_TYPE_VIDEO,
351  .filter_frame = ff_yadif_filter_frame,
352  .config_props = config_input,
353  },
354 };
355 
356 static const AVFilterPad deint_cuda_outputs[] = {
357  {
358  .name = "default",
359  .type = AVMEDIA_TYPE_VIDEO,
360  .request_frame = ff_yadif_request_frame,
361  .config_props = config_output,
362  },
363 };
364 
366  .name = "yadif_cuda",
367  .description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"),
368  .priv_size = sizeof(DeintCUDAContext),
369  .priv_class = &yadif_cuda_class,
375  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
376 };
func
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:68
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
hwcontext_cuda_internal.h
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:364
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:80
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
ff_cuda_load_module
int ff_cuda_load_module(void *avctx, AVCUDADeviceContext *hwctx, CUmodule *cu_module, const unsigned char *data, const unsigned int length)
Loads a CUDA module and applies any decompression, if neccesary.
Definition: load_helper.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
YADIFContext::csp
const AVPixFmtDescriptor * csp
Definition: yadif.h:76
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
YADIFContext::mode
int mode
YADIFMode.
Definition: yadif.h:54
DeintCUDAContext::input_frames_ref
AVBufferRef * input_frames_ref
Definition: vf_bwdif_cuda.c:38
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
DIV_UP
#define DIV_UP(a, b)
Definition: vf_yadif_cuda.c:48
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
DeintCUDAContext::input_frames
AVHWFramesContext * input_frames
Definition: vf_bwdif_cuda.c:39
DeintCUDAContext::hwctx
AVCUDADeviceContext * hwctx
Definition: vf_bwdif_cuda.c:36
DeintCUDAContext::cu_func_uchar
CUfunction cu_func_uchar
Definition: vf_bwdif_cuda.c:42
dummy
int dummy
Definition: motion.c:66
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_yadif_cuda.c:214
ff_ccfifo_uninit
void ff_ccfifo_uninit(CCFifo *ccf)
Free all memory allocated in a CCFifo and clear the context.
Definition: ccfifo.c:46
DeintCUDAContext::cu_func_uchar2
CUfunction cu_func_uchar2
Definition: vf_bwdif_cuda.c:43
DeintCUDAContext::device_ref
AVBufferRef * device_ref
Definition: vf_bwdif_cuda.c:37
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
filter
static void filter(AVFilterContext *ctx, AVFrame *dst, int parity, int tff)
Definition: vf_yadif_cuda.c:123
avassert.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
deint_cuda_outputs
static const AVFilterPad deint_cuda_outputs[]
Definition: vf_yadif_cuda.c:356
av_cold
#define av_cold
Definition: attributes.h:90
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
s
#define s(width, name)
Definition: cbs_vp9.c:198
DeintCUDAContext::cu_func_ushort2
CUfunction cu_func_ushort2
Definition: vf_bwdif_cuda.c:45
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_vf_yadif_cuda_ptx_data
const unsigned char ff_vf_yadif_cuda_ptx_data[]
deint_cuda_inputs
static const AVFilterPad deint_cuda_inputs[]
Definition: vf_yadif_cuda.c:347
BLOCKX
#define BLOCKX
Definition: vf_yadif_cuda.c:50
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
load_helper.h
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
yadif_cuda_class
static const AVClass yadif_cuda_class
Definition: vf_yadif_cuda.c:339
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
DeintCUDAContext::cu_module
CUmodule cu_module
Definition: vf_bwdif_cuda.c:41
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
DeintCUDAContext::yadif
YADIFContext yadif
Definition: vf_bwdif_cuda.c:34
deint_cuda_uninit
static av_cold void deint_cuda_uninit(AVFilterContext *ctx)
Definition: vf_yadif_cuda.c:190
CHECK_CU
#define CHECK_CU(x)
Definition: vf_yadif_cuda.c:53
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
ff_vf_yadif_cuda_ptx_len
const unsigned int ff_vf_yadif_cuda_ptx_len
ff_yadif_options
const AVOption ff_yadif_options[]
Definition: yadif_common.c:217
yadif.h
config_output
static int config_output(AVFilterLink *link)
Definition: vf_yadif_cuda.c:236
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
YADIFContext::filter
void(* filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: yadif.h:65
parity
mcdeint parity
Definition: vf_mcdeint.c:281
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
DeintCUDAContext
Definition: vf_bwdif_cuda.c:33
YADIFContext::prev
AVFrame * prev
Definition: yadif.h:62
internal.h
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:182
call_kernel
static CUresult call_kernel(AVFilterContext *ctx, CUfunction func, CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next, CUarray_format format, int channels, int src_width, int src_height, int src_pitch, CUdeviceptr dst, int dst_width, int dst_height, int dst_pitch, int parity, int tff)
Definition: vf_yadif_cuda.c:55
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
YADIFContext
Definition: yadif.h:51
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
BLOCKY
#define BLOCKY
Definition: vf_yadif_cuda.c:51
AVFilter
Filter definition.
Definition: avfilter.h:166
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
ff_ccfifo_init
int ff_ccfifo_init(CCFifo *ccf, AVRational framerate, void *log_ctx)
Initialize a CCFifo.
Definition: ccfifo.c:53
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
cuda_check.h
ff_vf_yadif_cuda
const AVFilter ff_vf_yadif_cuda
Definition: vf_yadif_cuda.c:365
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:752
AVFrame::height
int height
Definition: frame.h:412
YADIFContext::next
AVFrame * next
Definition: yadif.h:61
ff_yadif_request_frame
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:178
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
YADIFContext::cc_fifo
CCFifo cc_fifo
Definition: yadif.h:80
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DeintCUDAContext::cu_func_ushort
CUfunction cu_func_ushort
Definition: vf_bwdif_cuda.c:44
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
YADIFContext::cur
AVFrame * cur
Definition: yadif.h:60
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ff_yadif_filter_frame
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:100
ff_filter_init_hw_frames
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1547