FFmpeg
vf_transpose_vaapi.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include <string.h>
19 
20 #include "libavutil/avassert.h"
21 #include "libavutil/mem.h"
22 #include "libavutil/opt.h"
23 #include "libavutil/pixdesc.h"
24 
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "internal.h"
28 #include "transpose.h"
29 #include "vaapi_vpp.h"
30 
31 typedef struct TransposeVAAPIContext {
32  VAAPIVPPContext vpp_ctx; // must be the first field
33  int passthrough; // PassthroughType, landscape passthrough mode enabled
34  int dir; // TransposeDir
35 
39 
41 {
42  VAAPIVPPContext *vpp_ctx = avctx->priv;
43  TransposeVAAPIContext *ctx = avctx->priv;
44  VAStatus vas;
45  int support_flag;
46  VAProcPipelineCaps pipeline_caps;
47 
48  memset(&pipeline_caps, 0, sizeof(pipeline_caps));
49  vas = vaQueryVideoProcPipelineCaps(vpp_ctx->hwctx->display,
50  vpp_ctx->va_context,
51  NULL, 0,
52  &pipeline_caps);
53  if (vas != VA_STATUS_SUCCESS) {
54  av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline "
55  "caps: %d (%s).\n", vas, vaErrorStr(vas));
56  return AVERROR(EIO);
57  }
58 
59  if (!pipeline_caps.rotation_flags) {
60  av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support transpose\n");
61  return AVERROR(EINVAL);
62  }
63 
64  switch (ctx->dir) {
66  ctx->rotation_state = VA_ROTATION_270;
67  ctx->mirror_state = VA_MIRROR_VERTICAL;
68  break;
69  case TRANSPOSE_CLOCK:
70  ctx->rotation_state = VA_ROTATION_90;
71  ctx->mirror_state = VA_MIRROR_NONE;
72  break;
73  case TRANSPOSE_CCLOCK:
74  ctx->rotation_state = VA_ROTATION_270;
75  ctx->mirror_state = VA_MIRROR_NONE;
76  break;
78  ctx->rotation_state = VA_ROTATION_90;
79  ctx->mirror_state = VA_MIRROR_VERTICAL;
80  break;
81  case TRANSPOSE_REVERSAL:
82  ctx->rotation_state = VA_ROTATION_180;
83  ctx->mirror_state = VA_MIRROR_NONE;
84  break;
85  case TRANSPOSE_HFLIP:
86  ctx->rotation_state = VA_ROTATION_NONE;
87  ctx->mirror_state = VA_MIRROR_HORIZONTAL;
88  break;
89  case TRANSPOSE_VFLIP:
90  ctx->rotation_state = VA_ROTATION_NONE;
91  ctx->mirror_state = VA_MIRROR_VERTICAL;
92  break;
93  default:
94  av_log(avctx, AV_LOG_ERROR, "Failed to set direction to %d\n", ctx->dir);
95  return AVERROR(EINVAL);
96  }
97 
98  if (VA_ROTATION_NONE != ctx->rotation_state) {
99  support_flag = pipeline_caps.rotation_flags & (1 << ctx->rotation_state);
100  if (!support_flag) {
101  av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support rotation %d\n",
102  ctx->rotation_state);
103  return AVERROR(EINVAL);
104  }
105  }
106 
107  if (VA_MIRROR_NONE != ctx->mirror_state) {
108  support_flag = pipeline_caps.mirror_flags & ctx->mirror_state;
109  if (!support_flag) {
110  av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support mirror %d\n",
111  ctx->mirror_state);
112  return AVERROR(EINVAL);
113  }
114  }
115 
116  return 0;
117 }
118 
120 {
121  AVFilterContext *avctx = inlink->dst;
122  AVFilterLink *outlink = avctx->outputs[0];
123  VAAPIVPPContext *vpp_ctx = avctx->priv;
124  TransposeVAAPIContext *ctx = avctx->priv;
126  VAProcPipelineParameterBuffer params;
127  int err;
128 
129  if (ctx->passthrough)
130  return ff_filter_frame(outlink, input_frame);
131 
132  av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
133  av_get_pix_fmt_name(input_frame->format),
134  input_frame->width, input_frame->height, input_frame->pts);
135 
136  if (vpp_ctx->va_context == VA_INVALID_ID)
137  return AVERROR(EINVAL);
138 
139  output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width,
140  vpp_ctx->output_height);
141  if (!output_frame) {
142  err = AVERROR(ENOMEM);
143  goto fail;
144  }
145 
146  err = av_frame_copy_props(output_frame, input_frame);
147  if (err < 0)
148  return err;
149 
150  err = ff_vaapi_vpp_init_params(avctx, &params,
151  input_frame, output_frame);
152  if (err < 0)
153  goto fail;
154 
155  params.rotation_state = ctx->rotation_state;
156  params.mirror_state = ctx->mirror_state;
157 
158  err = ff_vaapi_vpp_render_picture(avctx, &params, output_frame);
159  if (err < 0)
160  goto fail;
161 
162  av_frame_free(&input_frame);
163 
164  av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
165  av_get_pix_fmt_name(output_frame->format),
166  output_frame->width, output_frame->height, output_frame->pts);
167 
168  return ff_filter_frame(outlink, output_frame);
169 
170 fail:
171  av_frame_free(&input_frame);
172  av_frame_free(&output_frame);
173  return err;
174 }
175 
177 {
178  VAAPIVPPContext *vpp_ctx = avctx->priv;
179 
180  ff_vaapi_vpp_ctx_init(avctx);
183  vpp_ctx->output_format = AV_PIX_FMT_NONE;
184 
185  return 0;
186 }
187 
189 {
190  AVFilterContext *avctx = outlink->src;
191  VAAPIVPPContext *vpp_ctx = avctx->priv;
192  TransposeVAAPIContext *ctx = avctx->priv;
193  AVFilterLink *inlink = avctx->inputs[0];
194 
195  if ((inlink->w >= inlink->h && ctx->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
196  (inlink->w <= inlink->h && ctx->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
197  outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
198  if (!outlink->hw_frames_ctx)
199  return AVERROR(ENOMEM);
200  av_log(avctx, AV_LOG_VERBOSE,
201  "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
202  inlink->w, inlink->h, inlink->w, inlink->h);
203  return 0;
204  }
205 
207 
208  switch (ctx->dir) {
210  case TRANSPOSE_CCLOCK:
211  case TRANSPOSE_CLOCK:
213  vpp_ctx->output_width = avctx->inputs[0]->h;
214  vpp_ctx->output_height = avctx->inputs[0]->w;
215  av_log(avctx, AV_LOG_DEBUG, "swap width and height for clock/cclock rotation\n");
216  break;
217  default:
218  break;
219  }
220 
221  return ff_vaapi_vpp_config_output(outlink);
222 }
223 
225 {
226  TransposeVAAPIContext *ctx = inlink->dst->priv;
227 
228  return ctx->passthrough ?
229  ff_null_get_video_buffer(inlink, w, h) :
230  ff_default_get_video_buffer(inlink, w, h);
231 }
232 
233 #define OFFSET(x) offsetof(TransposeVAAPIContext, x)
234 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
236  { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, "dir" },
237  { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
238  { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
239  { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
240  { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
241  { "reversal", "rotate by half-turn", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, .flags=FLAGS, .unit = "dir" },
242  { "hflip", "flip horizontally", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, .flags=FLAGS, .unit = "dir" },
243  { "vflip", "flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "dir" },
244 
245  { "passthrough", "do not apply transposition if the input matches the specified geometry",
246  OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
247  { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
248  { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
249  { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
250 
251  { NULL }
252 };
253 
254 
255 AVFILTER_DEFINE_CLASS(transpose_vaapi);
256 
258  {
259  .name = "default",
260  .type = AVMEDIA_TYPE_VIDEO,
261  .filter_frame = &transpose_vaapi_filter_frame,
262  .get_video_buffer = get_video_buffer,
263  .config_props = &ff_vaapi_vpp_config_input,
264  },
265  { NULL }
266 };
267 
269  {
270  .name = "default",
271  .type = AVMEDIA_TYPE_VIDEO,
272  .config_props = &transpose_vaapi_vpp_config_output,
273  },
274  { NULL }
275 };
276 
278  .name = "transpose_vaapi",
279  .description = NULL_IF_CONFIG_SMALL("VAAPI VPP for transpose"),
280  .priv_size = sizeof(TransposeVAAPIContext),
284  .inputs = transpose_vaapi_inputs,
285  .outputs = transpose_vaapi_outputs,
286  .priv_class = &transpose_vaapi_class,
287  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
288 };
#define NULL
Definition: coverity.c:32
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:385
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
int ff_vaapi_vpp_config_input(AVFilterLink *inlink)
Definition: vaapi_vpp.c:70
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
Memory handling functions.
int ff_vaapi_vpp_config_output(AVFilterLink *outlink)
Definition: vaapi_vpp.c:95
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVFrame * ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
Definition: video.c:39
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
static av_cold int transpose_vaapi_init(AVFilterContext *avctx)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
static int transpose_vaapi_build_filter_params(AVFilterContext *avctx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
static const AVFilterPad transpose_vaapi_inputs[]
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static const AVOption transpose_vaapi_options[]
static int transpose_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
GLenum GLint * params
Definition: opengl_enc.c:113
simple assert() macros that are a bit more flexible than ISO C assert().
#define fail()
Definition: checkasm.h:120
#define FLAGS
void ff_vaapi_vpp_pipeline_uninit(AVFilterContext *avctx)
Definition: vaapi_vpp.c:44
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
static int transpose_vaapi_vpp_config_output(AVFilterLink *outlink)
static AVFrame * get_video_buffer(AVFilterLink *inlink, int w, int h)
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
VADisplay display
The VADisplay handle, to be filled by the user.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
AVFilter ff_vf_transpose_vaapi
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264dec.c:832
AVFILTER_DEFINE_CLASS(transpose_vaapi)
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
VAContextID va_context
Definition: vaapi_vpp.h:41
enum AVPixelFormat output_format
Definition: vaapi_vpp.h:47
AVVAAPIDeviceContext * hwctx
Definition: vaapi_vpp.h:36
#define OFFSET(x)
AVFrame * ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
Definition: video.c:44
int ff_vaapi_vpp_render_picture(AVFilterContext *avctx, VAProcPipelineParameterBuffer *params, AVFrame *output_frame)
Definition: vaapi_vpp.c:592
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int ff_vaapi_vpp_query_formats(AVFilterContext *avctx)
Definition: vaapi_vpp.c:27
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
void ff_vaapi_vpp_ctx_init(AVFilterContext *avctx)
Definition: vaapi_vpp.c:666
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:326
int ff_vaapi_vpp_init_params(AVFilterContext *avctx, VAProcPipelineParameterBuffer *params, const AVFrame *input_frame, AVFrame *output_frame)
Definition: vaapi_vpp.c:515
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
int(* build_filter_params)(AVFilterContext *avctx)
Definition: vaapi_vpp.h:54
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* pipeline_uninit)(AVFilterContext *avctx)
Definition: vaapi_vpp.h:56
void ff_vaapi_vpp_ctx_uninit(AVFilterContext *avctx)
Definition: vaapi_vpp.c:680
static const AVFilterPad transpose_vaapi_outputs[]
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654