FFmpeg
qsvenc_hevc.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV based HEVC encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 
22 #include <stdint.h>
23 #include <sys/types.h>
24 
25 #include <mfxvideo.h>
26 
27 #include "libavutil/common.h"
28 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "codec_internal.h"
34 #include "get_bits.h"
35 #include "hevc.h"
36 #include "hevcdec.h"
37 #include "h2645_parse.h"
38 #include "qsv.h"
39 #include "qsvenc.h"
40 
41 enum LoadPlugin {
45 };
46 
47 typedef struct QSVHEVCEncContext {
48  AVClass *class;
52 
54 {
55  GetByteContext gbc;
56  PutByteContext pbc;
57 
58  GetBitContext gb;
59  H2645RBSP sps_rbsp = { NULL };
60  H2645NAL sps_nal = { NULL };
61  HEVCSPS sps = { 0 };
62  HEVCVPS vps = { 0 };
63  uint8_t vps_buf[128], vps_rbsp_buf[128];
64  uint8_t *new_extradata;
65  unsigned int sps_id;
66  int ret, i, type, vps_size;
67 
68  if (!avctx->extradata_size) {
69  av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx\n");
70  return AVERROR_UNKNOWN;
71  }
72 
74  if (!sps_rbsp.rbsp_buffer)
75  return AVERROR(ENOMEM);
76 
77  /* parse the SPS */
78  ret = ff_h2645_extract_rbsp(avctx->extradata + 4, avctx->extradata_size - 4, &sps_rbsp, &sps_nal, 1);
79  if (ret < 0) {
80  av_log(avctx, AV_LOG_ERROR, "Error unescaping the SPS buffer\n");
81  return ret;
82  }
83 
84  ret = init_get_bits8(&gb, sps_nal.data, sps_nal.size);
85  if (ret < 0) {
86  av_freep(&sps_rbsp.rbsp_buffer);
87  return ret;
88  }
89 
90  get_bits(&gb, 1);
91  type = get_bits(&gb, 6);
92  if (type != HEVC_NAL_SPS) {
93  av_log(avctx, AV_LOG_ERROR, "Unexpected NAL type in the extradata: %d\n",
94  type);
95  av_freep(&sps_rbsp.rbsp_buffer);
96  return AVERROR_INVALIDDATA;
97  }
98  get_bits(&gb, 9);
99 
100  ret = ff_hevc_parse_sps(&sps, &gb, &sps_id, 0, NULL, avctx);
101  av_freep(&sps_rbsp.rbsp_buffer);
102  if (ret < 0) {
103  av_log(avctx, AV_LOG_ERROR, "Error parsing the SPS\n");
104  return ret;
105  }
106 
107  /* generate the VPS */
108  vps.vps_max_layers = 1;
109  vps.vps_max_sub_layers = sps.max_sub_layers;
110  vps.vps_temporal_id_nesting_flag = sps.temporal_id_nesting_flag;
111  memcpy(&vps.ptl, &sps.ptl, sizeof(vps.ptl));
112  vps.vps_sub_layer_ordering_info_present_flag = 1;
113  for (i = 0; i < HEVC_MAX_SUB_LAYERS; i++) {
114  vps.vps_max_dec_pic_buffering[i] = sps.temporal_layer[i].max_dec_pic_buffering;
115  vps.vps_num_reorder_pics[i] = sps.temporal_layer[i].num_reorder_pics;
116  vps.vps_max_latency_increase[i] = sps.temporal_layer[i].max_latency_increase;
117  }
118 
119  vps.vps_num_layer_sets = 1;
120  vps.vps_timing_info_present_flag = sps.vui.vui_timing_info_present_flag;
121  vps.vps_num_units_in_tick = sps.vui.vui_num_units_in_tick;
122  vps.vps_time_scale = sps.vui.vui_time_scale;
123  vps.vps_poc_proportional_to_timing_flag = sps.vui.vui_poc_proportional_to_timing_flag;
124  vps.vps_num_ticks_poc_diff_one = sps.vui.vui_num_ticks_poc_diff_one_minus1 + 1;
125  vps.vps_num_hrd_parameters = 0;
126 
127  /* generate the encoded RBSP form of the VPS */
128  ret = ff_hevc_encode_nal_vps(&vps, sps.vps_id, vps_rbsp_buf, sizeof(vps_rbsp_buf));
129  if (ret < 0) {
130  av_log(avctx, AV_LOG_ERROR, "Error writing the VPS\n");
131  return ret;
132  }
133 
134  /* escape and add the startcode */
135  bytestream2_init(&gbc, vps_rbsp_buf, ret);
136  bytestream2_init_writer(&pbc, vps_buf, sizeof(vps_buf));
137 
138  bytestream2_put_be32(&pbc, 1); // startcode
139  bytestream2_put_byte(&pbc, HEVC_NAL_VPS << 1); // NAL
140  bytestream2_put_byte(&pbc, 1); // header
141 
142  while (bytestream2_get_bytes_left(&gbc)) {
143  if (bytestream2_get_bytes_left(&gbc) >= 3 && bytestream2_peek_be24(&gbc) <= 3) {
144  bytestream2_put_be24(&pbc, 3);
145  bytestream2_skip(&gbc, 2);
146  } else
147  bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
148  }
149 
150  vps_size = bytestream2_tell_p(&pbc);
151  new_extradata = av_mallocz(vps_size + avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
152  if (!new_extradata)
153  return AVERROR(ENOMEM);
154  memcpy(new_extradata, vps_buf, vps_size);
155  memcpy(new_extradata + vps_size, avctx->extradata, avctx->extradata_size);
156 
157  av_freep(&avctx->extradata);
158  avctx->extradata = new_extradata;
159  avctx->extradata_size += vps_size;
160 
161  return 0;
162 }
163 
165  const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
166 {
167  QSVHEVCEncContext *q = avctx->priv_data;
168  AVFrameSideData *sd;
169 
170  if (!frame || !QSV_RUNTIME_VERSION_ATLEAST(q->qsv.ver, 1, 25))
171  return 0;
172 
174  if (sd) {
176 
177  // SEI is needed when both the primaries and luminance are set
178  if (mdm->has_primaries && mdm->has_luminance) {
179  const int mapping[3] = {1, 2, 0};
180  const int chroma_den = 50000;
181  const int luma_den = 10000;
182  int i;
183  mfxExtMasteringDisplayColourVolume *mdcv = av_mallocz(sizeof(mfxExtMasteringDisplayColourVolume));
184 
185  if (!mdcv)
186  return AVERROR(ENOMEM);
187 
188  mdcv->Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
189  mdcv->Header.BufferSz = sizeof(*mdcv);
190 
191  for (i = 0; i < 3; i++) {
192  const int j = mapping[i];
193 
194  mdcv->DisplayPrimariesX[i] =
195  FFMIN(lrint(chroma_den *
196  av_q2d(mdm->display_primaries[j][0])),
197  chroma_den);
198  mdcv->DisplayPrimariesY[i] =
199  FFMIN(lrint(chroma_den *
200  av_q2d(mdm->display_primaries[j][1])),
201  chroma_den);
202  }
203 
204  mdcv->WhitePointX =
205  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[0])),
206  chroma_den);
207  mdcv->WhitePointY =
208  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[1])),
209  chroma_den);
210 
211  mdcv->MaxDisplayMasteringLuminance =
212  lrint(luma_den * av_q2d(mdm->max_luminance));
213  mdcv->MinDisplayMasteringLuminance =
214  FFMIN(lrint(luma_den * av_q2d(mdm->min_luminance)),
215  mdcv->MaxDisplayMasteringLuminance);
216 
217  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)mdcv;
218  }
219  }
220 
222  if (sd) {
224  mfxExtContentLightLevelInfo * clli = av_mallocz(sizeof(mfxExtContentLightLevelInfo));
225 
226  if (!clli)
227  return AVERROR(ENOMEM);
228 
229  clli->Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
230  clli->Header.BufferSz = sizeof(*clli);
231 
232  clli->MaxContentLightLevel = FFMIN(clm->MaxCLL, 65535);
233  clli->MaxPicAverageLightLevel = FFMIN(clm->MaxFALL, 65535);
234 
235  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)clli;
236  }
237 
238  return 0;
239 }
240 
242 {
243  QSVHEVCEncContext *q = avctx->priv_data;
244  int ret;
245 
246  if (q->load_plugin != LOAD_PLUGIN_NONE) {
247  static const char * const uid_hevcenc_sw = "2fca99749fdb49aeb121a5b63ef568f7";
248  static const char * const uid_hevcenc_hw = "6fadc791a0c2eb479ab6dcd5ea9da347";
249 
250  if (q->qsv.load_plugins[0]) {
251  av_log(avctx, AV_LOG_WARNING,
252  "load_plugins is not empty, but load_plugin is not set to 'none'."
253  "The load_plugin value will be ignored.\n");
254  } else {
256 
258  q->qsv.load_plugins = av_strdup(uid_hevcenc_sw);
259  else
260  q->qsv.load_plugins = av_strdup(uid_hevcenc_hw);
261 
262  if (!q->qsv.load_plugins)
263  return AVERROR(ENOMEM);
264  }
265  }
266 
267  // HEVC and H264 meaning of the value is shifted by 1, make it consistent
268  q->qsv.idr_interval++;
269 
271 
272  ret = ff_qsv_enc_init(avctx, &q->qsv);
273  if (ret < 0)
274  return ret;
275 
276  if (!q->qsv.hevc_vps) {
277  ret = generate_fake_vps(&q->qsv, avctx);
278  if (ret < 0) {
279  ff_qsv_enc_close(avctx, &q->qsv);
280  return ret;
281  }
282  }
283 
284  return 0;
285 }
286 
288  const AVFrame *frame, int *got_packet)
289 {
290  QSVHEVCEncContext *q = avctx->priv_data;
291 
292  return ff_qsv_encode(avctx, &q->qsv, pkt, frame, got_packet);
293 }
294 
296 {
297  QSVHEVCEncContext *q = avctx->priv_data;
298 
299  return ff_qsv_enc_close(avctx, &q->qsv);
300 }
301 
302 #define OFFSET(x) offsetof(QSVHEVCEncContext, x)
303 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
304 static const AVOption options[] = {
321 #if QSV_HAVE_HE
322  QSV_HE_OPTIONS
323 #endif
324 
325  { "idr_interval", "Distance (in I-frames) between IDR frames", OFFSET(qsv.idr_interval), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT_MAX, VE, "idr_interval" },
326  { "begin_only", "Output an IDR-frame only at the beginning of the stream", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, 0, 0, VE, "idr_interval" },
327  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VE, "load_plugin" },
328  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VE, "load_plugin" },
329  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VE, "load_plugin" },
330  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VE, "load_plugin" },
331 
332  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
333  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VE },
334 
335  { "look_ahead_depth", "Depth of look ahead in number frames, available when extbrc option is enabled", OFFSET(qsv.look_ahead_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, VE },
336  { "profile", NULL, OFFSET(qsv.profile), AV_OPT_TYPE_INT, { .i64 = MFX_PROFILE_UNKNOWN }, 0, INT_MAX, VE, "profile" },
337  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, VE, "profile" },
338  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
339  { "main10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
340  { "mainsp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAINSP }, INT_MIN, INT_MAX, VE, "profile" },
341  { "rext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_REXT }, INT_MIN, INT_MAX, VE, "profile" },
342 #if QSV_VERSION_ATLEAST(1, 32)
343  { "scc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_SCC }, INT_MIN, INT_MAX, VE, "profile" },
344 #endif
345  { "tier", "Set the encoding tier (only level >= 4 can support high tier)", OFFSET(qsv.tier), AV_OPT_TYPE_INT, { .i64 = MFX_TIER_HEVC_HIGH }, MFX_TIER_HEVC_MAIN, MFX_TIER_HEVC_HIGH, VE, "tier" },
346  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_MAIN }, INT_MIN, INT_MAX, VE, "tier" },
347  { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_HIGH }, INT_MIN, INT_MAX, VE, "tier" },
348 
349  { "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
350 
351  { "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
352  { "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
353  { "recovery_point_sei", "Insert recovery point SEI messages", OFFSET(qsv.recovery_point_sei), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
354  { "aud", "Insert the Access Unit Delimiter NAL", OFFSET(qsv.aud), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
355  { "pic_timing_sei", "Insert picture timing SEI with pic_struct_syntax element", OFFSET(qsv.pic_timing_sei), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
356  { "transform_skip", "Turn this option ON to enable transformskip", OFFSET(qsv.transform_skip), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 1, VE},
357  { "int_ref_type", "Intra refresh type. B frames should be set to 0", OFFSET(qsv.int_ref_type), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE, "int_ref_type" },
358  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags = VE, "int_ref_type" },
359  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags = VE, "int_ref_type" },
360  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, .flags = VE, "int_ref_type" },
361  { "slice" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, .flags = VE, "int_ref_type" },
362  { "int_ref_cycle_size", "Number of frames in the intra refresh cycle", OFFSET(qsv.int_ref_cycle_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE },
363  { "int_ref_qp_delta", "QP difference for the refresh MBs", OFFSET(qsv.int_ref_qp_delta), AV_OPT_TYPE_INT, { .i64 = INT16_MIN }, INT16_MIN, INT16_MAX, VE },
364  { "int_ref_cycle_dist", "Distance between the beginnings of the intra-refresh cycles in frames", OFFSET(qsv.int_ref_cycle_dist), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT16_MAX, VE },
365 
366  { NULL },
367 };
368 
369 static const AVClass class = {
370  .class_name = "hevc_qsv encoder",
371  .item_name = av_default_item_name,
372  .option = options,
374 };
375 
377  { "b", "1M" },
378  { "refs", "0" },
379  { "g", "-1" },
380  { "bf", "-1" },
381  { "qmin", "-1" },
382  { "qmax", "-1" },
383  { "trellis", "-1" },
384  { NULL },
385 };
386 
388  .p.name = "hevc_qsv",
389  CODEC_LONG_NAME("HEVC (Intel Quick Sync Video acceleration)"),
390  .priv_data_size = sizeof(QSVHEVCEncContext),
391  .p.type = AVMEDIA_TYPE_VIDEO,
392  .p.id = AV_CODEC_ID_HEVC,
393  .init = qsv_enc_init,
395  .close = qsv_enc_close,
396  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
397  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
407  AV_PIX_FMT_NONE },
408  .p.priv_class = &class,
409  .defaults = qsv_enc_defaults,
410  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
412  .p.wrapper_name = "qsv",
413  .hw_configs = ff_qsv_enc_hw_configs,
414 };
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
GetByteContext
Definition: bytestream.h:33
QSV_OPTION_ADAPTIVE_B
#define QSV_OPTION_ADAPTIVE_B
Definition: qsvenc.h:100
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvenc_hevc.c:42
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
QSV_OPTION_DBLK_IDC
#define QSV_OPTION_DBLK_IDC
Definition: qsvenc.h:109
AVOption
AVOption.
Definition: opt.h:251
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
FF_CODEC_CAP_NOT_INIT_THREADSAFE
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
Definition: codec_internal.h:34
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:514
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
QSVEncContext::load_plugins
char * load_plugins
Definition: qsvenc.h:262
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
QSV_OPTION_MAX_MIN_QP
#define QSV_OPTION_MAX_MIN_QP
Definition: qsvenc.h:115
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
QSVEncContext::hevc_vps
int hevc_vps
Definition: qsvenc.h:206
FFCodecDefault
Definition: codec_internal.h:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:107
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:315
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:512
lrint
#define lrint
Definition: tablegen.h:53
ff_qsv_enc_hw_configs
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[]
Definition: qsvenc.c:2598
ff_hevc_qsv_encoder
const FFCodec ff_hevc_qsv_encoder
Definition: qsvenc_hevc.c:387
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
QSVEncContext
Definition: qsvenc.h:156
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
qsvenc.h
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:64
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
generate_fake_vps
static int generate_fake_vps(QSVEncContext *q, AVCodecContext *avctx)
Definition: qsvenc_hevc.c:53
get_bits.h
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
QSV_OPTION_MAX_SLICE_SIZE
#define QSV_OPTION_MAX_SLICE_SIZE
Definition: qsvenc.h:85
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
QSV_OPTION_RDO
#define QSV_OPTION_RDO
Definition: qsvenc.h:77
HEVC_MAX_SUB_LAYERS
@ HEVC_MAX_SUB_LAYERS
Definition: hevc.h:105
if
if(ret)
Definition: filter_design.txt:179
ff_hevc_parse_sps
int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, int apply_defdispwin, AVBufferRef **vps_list, AVCodecContext *avctx)
Parse the SPS from the bitstream into the provided HEVCSPS struct.
Definition: hevc_ps.c:848
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
QSV_OPTION_LOW_DELAY_BRC
#define QSV_OPTION_LOW_DELAY_BRC
Definition: qsvenc.h:112
NULL
#define NULL
Definition: coverity.c:32
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
QSV_COMMON_OPTS
#define QSV_COMMON_OPTS
Definition: qsvenc.h:56
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:240
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
QSV_OPTION_ADAPTIVE_I
#define QSV_OPTION_ADAPTIVE_I
Definition: qsvenc.h:97
H2645RBSP::rbsp_buffer
uint8_t * rbsp_buffer
Definition: h2645_parse.h:75
PutByteContext
Definition: bytestream.h:37
ff_hevc_encode_nal_vps
int ff_hevc_encode_nal_vps(HEVCVPS *vps, unsigned int id, uint8_t *buf, int buf_size)
Definition: hevc_ps_enc.c:66
QSV_OPTION_SKIP_FRAME
#define QSV_OPTION_SKIP_FRAME
Definition: qsvenc.h:140
hevcdec.h
codec_internal.h
VE
#define VE
Definition: qsvenc_hevc.c:303
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:509
H2645NAL
Definition: h2645_parse.h:34
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
QSV_OPTION_SCENARIO
#define QSV_OPTION_SCENARIO
Definition: qsvenc.h:123
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvenc_hevc.c:43
qsv_enc_init
static av_cold int qsv_enc_init(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:241
QSV_OPTION_EXTBRC
#define QSV_OPTION_EXTBRC
Definition: qsvenc.h:94
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
ff_qsv_enc_close
int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:2550
QSVEncContext::set_encode_ctrl_cb
SetEncodeCtrlCB * set_encode_ctrl_cb
Definition: qsvenc.h:263
QSVHEVCEncContext
Definition: qsvenc_hevc.c:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
ff_h2645_extract_rbsp
int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645RBSP *rbsp, H2645NAL *nal, int small_padding)
Extract the raw (unescaped) bitstream.
Definition: h2645_parse.c:34
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:516
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
QSVHEVCEncContext::load_plugin
int load_plugin
Definition: qsvenc_hevc.c:50
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
options
static const AVOption options[]
Definition: qsvenc_hevc.c:304
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
qsv_hevc_set_encode_ctrl
static int qsv_hevc_set_encode_ctrl(AVCodecContext *avctx, const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
Definition: qsvenc_hevc.c:164
hevc.h
H2645RBSP::rbsp_buffer_alloc_size
int rbsp_buffer_alloc_size
Definition: h2645_parse.h:77
avcodec.h
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
ret
ret
Definition: filter_design.txt:187
QSV_OPTION_P_STRATEGY
#define QSV_OPTION_P_STRATEGY
Definition: qsvenc.h:103
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
LoadPlugin
LoadPlugin
Definition: qsvdec.c:954
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
QSV_OPTION_MAX_FRAME_SIZE
#define QSV_OPTION_MAX_FRAME_SIZE
Definition: qsvenc.h:80
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvenc_hevc.c:44
QSVHEVCEncContext::qsv
QSVEncContext qsv
Definition: qsvenc_hevc.c:49
AVCodecContext
main external API structure.
Definition: avcodec.h:426
QSVEncContext::idr_interval
int idr_interval
Definition: qsvenc.h:210
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
QSVEncContext::ver
mfxVersion ver
Definition: qsvenc.h:204
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:508
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
QSV_OPTION_AVBR
#define QSV_OPTION_AVBR
Definition: qsvenc.h:136
ff_qsv_enc_init
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:1600
HEVCVPS
Definition: hevc_ps.h:110
mastering_display_metadata.h
AV_CODEC_CAP_HYBRID
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
Definition: codec.h:149
HEVCSPS
Definition: hevc_ps.h:140
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
bytestream.h
qsv_enc_defaults
static const FFCodecDefault qsv_enc_defaults[]
Definition: qsvenc_hevc.c:376
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
qsv_enc_frame
static int qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc_hevc.c:287
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
OFFSET
#define OFFSET(x)
Definition: qsvenc_hevc.c:302
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:403
H2645RBSP
Definition: h2645_parse.h:74
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_qsv_encode
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc.c:2484
qsv_enc_close
static av_cold int qsv_enc_close(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:295
QSV_OPTION_MBBRC
#define QSV_OPTION_MBBRC
Definition: qsvenc.h:91
QSV_OPTION_B_STRATEGY
#define QSV_OPTION_B_STRATEGY
Definition: qsvenc.h:106