FFmpeg
qsvenc_hevc.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV based HEVC encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 
22 #include <stdint.h>
23 #include <sys/types.h>
24 
25 #include <mfxvideo.h>
26 
27 #include "libavutil/common.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/opt.h"
31 
32 #include "avcodec.h"
33 #include "bytestream.h"
34 #include "codec_internal.h"
35 #include "get_bits.h"
36 #include "hevc.h"
37 #include "hevcdec.h"
38 #include "h2645_parse.h"
39 #include "qsv.h"
40 #include "qsvenc.h"
41 
42 enum LoadPlugin {
46 };
47 
48 typedef struct QSVHEVCEncContext {
49  AVClass *class;
53 
55 {
56  GetByteContext gbc;
57  PutByteContext pbc;
58 
59  GetBitContext gb;
60  H2645RBSP sps_rbsp = { NULL };
61  H2645NAL sps_nal = { NULL };
62  HEVCSPS sps = { 0 };
63  HEVCVPS vps = { 0 };
64  uint8_t vps_buf[128], vps_rbsp_buf[128];
65  uint8_t *new_extradata;
66  unsigned int sps_id;
67  int ret, i, type, vps_size;
68 
69  if (!avctx->extradata_size) {
70  av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx\n");
71  return AVERROR_UNKNOWN;
72  }
73 
75  if (!sps_rbsp.rbsp_buffer)
76  return AVERROR(ENOMEM);
77 
78  /* parse the SPS */
79  ret = ff_h2645_extract_rbsp(avctx->extradata + 4, avctx->extradata_size - 4, &sps_rbsp, &sps_nal, 1);
80  if (ret < 0) {
81  av_log(avctx, AV_LOG_ERROR, "Error unescaping the SPS buffer\n");
82  return ret;
83  }
84 
85  ret = init_get_bits8(&gb, sps_nal.data, sps_nal.size);
86  if (ret < 0) {
87  av_freep(&sps_rbsp.rbsp_buffer);
88  return ret;
89  }
90 
91  get_bits(&gb, 1);
92  type = get_bits(&gb, 6);
93  if (type != HEVC_NAL_SPS) {
94  av_log(avctx, AV_LOG_ERROR, "Unexpected NAL type in the extradata: %d\n",
95  type);
96  av_freep(&sps_rbsp.rbsp_buffer);
97  return AVERROR_INVALIDDATA;
98  }
99  get_bits(&gb, 9);
100 
101  ret = ff_hevc_parse_sps(&sps, &gb, &sps_id, 0, NULL, avctx);
102  av_freep(&sps_rbsp.rbsp_buffer);
103  if (ret < 0) {
104  av_log(avctx, AV_LOG_ERROR, "Error parsing the SPS\n");
105  return ret;
106  }
107 
108  /* generate the VPS */
109  vps.vps_max_layers = 1;
110  vps.vps_max_sub_layers = sps.max_sub_layers;
111  vps.vps_temporal_id_nesting_flag = sps.temporal_id_nesting_flag;
112  memcpy(&vps.ptl, &sps.ptl, sizeof(vps.ptl));
113  vps.vps_sub_layer_ordering_info_present_flag = 1;
114  for (i = 0; i < HEVC_MAX_SUB_LAYERS; i++) {
115  vps.vps_max_dec_pic_buffering[i] = sps.temporal_layer[i].max_dec_pic_buffering;
116  vps.vps_num_reorder_pics[i] = sps.temporal_layer[i].num_reorder_pics;
117  vps.vps_max_latency_increase[i] = sps.temporal_layer[i].max_latency_increase;
118  }
119 
120  vps.vps_num_layer_sets = 1;
121  vps.vps_timing_info_present_flag = sps.vui.vui_timing_info_present_flag;
122  vps.vps_num_units_in_tick = sps.vui.vui_num_units_in_tick;
123  vps.vps_time_scale = sps.vui.vui_time_scale;
124  vps.vps_poc_proportional_to_timing_flag = sps.vui.vui_poc_proportional_to_timing_flag;
125  vps.vps_num_ticks_poc_diff_one = sps.vui.vui_num_ticks_poc_diff_one_minus1 + 1;
126  vps.vps_num_hrd_parameters = 0;
127 
128  /* generate the encoded RBSP form of the VPS */
129  ret = ff_hevc_encode_nal_vps(&vps, sps.vps_id, vps_rbsp_buf, sizeof(vps_rbsp_buf));
130  if (ret < 0) {
131  av_log(avctx, AV_LOG_ERROR, "Error writing the VPS\n");
132  return ret;
133  }
134 
135  /* escape and add the startcode */
136  bytestream2_init(&gbc, vps_rbsp_buf, ret);
137  bytestream2_init_writer(&pbc, vps_buf, sizeof(vps_buf));
138 
139  bytestream2_put_be32(&pbc, 1); // startcode
140  bytestream2_put_byte(&pbc, HEVC_NAL_VPS << 1); // NAL
141  bytestream2_put_byte(&pbc, 1); // header
142 
143  while (bytestream2_get_bytes_left(&gbc)) {
144  if (bytestream2_get_bytes_left(&gbc) >= 3 && bytestream2_peek_be24(&gbc) <= 3) {
145  bytestream2_put_be24(&pbc, 3);
146  bytestream2_skip(&gbc, 2);
147  } else
148  bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
149  }
150 
151  vps_size = bytestream2_tell_p(&pbc);
152  new_extradata = av_mallocz(vps_size + avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
153  if (!new_extradata)
154  return AVERROR(ENOMEM);
155  memcpy(new_extradata, vps_buf, vps_size);
156  memcpy(new_extradata + vps_size, avctx->extradata, avctx->extradata_size);
157 
158  av_freep(&avctx->extradata);
159  avctx->extradata = new_extradata;
160  avctx->extradata_size += vps_size;
161 
162  return 0;
163 }
164 
166  const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
167 {
168  QSVHEVCEncContext *q = avctx->priv_data;
169  AVFrameSideData *sd;
170 
171  if (!frame || !QSV_RUNTIME_VERSION_ATLEAST(q->qsv.ver, 1, 25))
172  return 0;
173 
175  if (sd) {
177 
178  // SEI is needed when both the primaries and luminance are set
179  if (mdm->has_primaries && mdm->has_luminance) {
180  const int mapping[3] = {1, 2, 0};
181  const int chroma_den = 50000;
182  const int luma_den = 10000;
183  int i;
184  mfxExtMasteringDisplayColourVolume *mdcv = av_mallocz(sizeof(mfxExtMasteringDisplayColourVolume));
185 
186  if (!mdcv)
187  return AVERROR(ENOMEM);
188 
189  mdcv->Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
190  mdcv->Header.BufferSz = sizeof(*mdcv);
191 
192  for (i = 0; i < 3; i++) {
193  const int j = mapping[i];
194 
195  mdcv->DisplayPrimariesX[i] =
196  FFMIN(lrint(chroma_den *
197  av_q2d(mdm->display_primaries[j][0])),
198  chroma_den);
199  mdcv->DisplayPrimariesY[i] =
200  FFMIN(lrint(chroma_den *
201  av_q2d(mdm->display_primaries[j][1])),
202  chroma_den);
203  }
204 
205  mdcv->WhitePointX =
206  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[0])),
207  chroma_den);
208  mdcv->WhitePointY =
209  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[1])),
210  chroma_den);
211 
212  mdcv->MaxDisplayMasteringLuminance =
213  lrint(luma_den * av_q2d(mdm->max_luminance));
214  mdcv->MinDisplayMasteringLuminance =
215  FFMIN(lrint(luma_den * av_q2d(mdm->min_luminance)),
216  mdcv->MaxDisplayMasteringLuminance);
217 
218  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)mdcv;
219  }
220  }
221 
223  if (sd) {
225  mfxExtContentLightLevelInfo * clli = av_mallocz(sizeof(mfxExtContentLightLevelInfo));
226 
227  if (!clli)
228  return AVERROR(ENOMEM);
229 
230  clli->Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
231  clli->Header.BufferSz = sizeof(*clli);
232 
233  clli->MaxContentLightLevel = FFMIN(clm->MaxCLL, 65535);
234  clli->MaxPicAverageLightLevel = FFMIN(clm->MaxFALL, 65535);
235 
236  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)clli;
237  }
238 
239  return 0;
240 }
241 
243 {
244  QSVHEVCEncContext *q = avctx->priv_data;
245  int ret;
246 
247  if (q->load_plugin != LOAD_PLUGIN_NONE) {
248  static const char * const uid_hevcenc_sw = "2fca99749fdb49aeb121a5b63ef568f7";
249  static const char * const uid_hevcenc_hw = "6fadc791a0c2eb479ab6dcd5ea9da347";
250 
251  if (q->qsv.load_plugins[0]) {
252  av_log(avctx, AV_LOG_WARNING,
253  "load_plugins is not empty, but load_plugin is not set to 'none'."
254  "The load_plugin value will be ignored.\n");
255  } else {
257 
259  q->qsv.load_plugins = av_strdup(uid_hevcenc_sw);
260  else
261  q->qsv.load_plugins = av_strdup(uid_hevcenc_hw);
262 
263  if (!q->qsv.load_plugins)
264  return AVERROR(ENOMEM);
265  }
266  }
267 
268  // HEVC and H264 meaning of the value is shifted by 1, make it consistent
269  q->qsv.idr_interval++;
270 
272 
273  ret = ff_qsv_enc_init(avctx, &q->qsv);
274  if (ret < 0)
275  return ret;
276 
277  if (!q->qsv.hevc_vps) {
278  ret = generate_fake_vps(&q->qsv, avctx);
279  if (ret < 0) {
280  ff_qsv_enc_close(avctx, &q->qsv);
281  return ret;
282  }
283  }
284 
285  return 0;
286 }
287 
289  const AVFrame *frame, int *got_packet)
290 {
291  QSVHEVCEncContext *q = avctx->priv_data;
292 
293  return ff_qsv_encode(avctx, &q->qsv, pkt, frame, got_packet);
294 }
295 
297 {
298  QSVHEVCEncContext *q = avctx->priv_data;
299 
300  return ff_qsv_enc_close(avctx, &q->qsv);
301 }
302 
303 #define OFFSET(x) offsetof(QSVHEVCEncContext, x)
304 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
305 static const AVOption options[] = {
322 #if QSV_HAVE_HE
323  QSV_HE_OPTIONS
324 #endif
325 
326  { "idr_interval", "Distance (in I-frames) between IDR frames", OFFSET(qsv.idr_interval), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT_MAX, VE, .unit = "idr_interval" },
327  { "begin_only", "Output an IDR-frame only at the beginning of the stream", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, 0, 0, VE, .unit = "idr_interval" },
328  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VE, .unit = "load_plugin" },
329  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VE, .unit = "load_plugin" },
330  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VE, .unit = "load_plugin" },
331  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VE, .unit = "load_plugin" },
332 
333  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
334  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VE },
335 
336  { "look_ahead_depth", "Depth of look ahead in number frames, available when extbrc option is enabled", OFFSET(qsv.look_ahead_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, VE },
337  { "profile", NULL, OFFSET(qsv.profile), AV_OPT_TYPE_INT, { .i64 = MFX_PROFILE_UNKNOWN }, 0, INT_MAX, VE, .unit = "profile" },
338  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
339  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
340  { "main10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN10 }, INT_MIN, INT_MAX, VE, .unit = "profile" },
341  { "mainsp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAINSP }, INT_MIN, INT_MAX, VE, .unit = "profile" },
342  { "rext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_REXT }, INT_MIN, INT_MAX, VE, .unit = "profile" },
343 #if QSV_VERSION_ATLEAST(1, 32)
344  { "scc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_SCC }, INT_MIN, INT_MAX, VE, .unit = "profile" },
345 #endif
346  { "tier", "Set the encoding tier (only level >= 4 can support high tier)", OFFSET(qsv.tier), AV_OPT_TYPE_INT, { .i64 = MFX_TIER_HEVC_HIGH }, MFX_TIER_HEVC_MAIN, MFX_TIER_HEVC_HIGH, VE, .unit = "tier" },
347  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_MAIN }, INT_MIN, INT_MAX, VE, .unit = "tier" },
348  { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_HIGH }, INT_MIN, INT_MAX, VE, .unit = "tier" },
349 
350  { "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
351 
352  { "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
353  { "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
354  { "recovery_point_sei", "Insert recovery point SEI messages", OFFSET(qsv.recovery_point_sei), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
355  { "aud", "Insert the Access Unit Delimiter NAL", OFFSET(qsv.aud), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
356  { "pic_timing_sei", "Insert picture timing SEI with pic_struct_syntax element", OFFSET(qsv.pic_timing_sei), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
357  { "transform_skip", "Turn this option ON to enable transformskip", OFFSET(qsv.transform_skip), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 1, VE},
358  { "int_ref_type", "Intra refresh type. B frames should be set to 0", OFFSET(qsv.int_ref_type), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE, .unit = "int_ref_type" },
359  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags = VE, .unit = "int_ref_type" },
360  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags = VE, .unit = "int_ref_type" },
361  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, .flags = VE, .unit = "int_ref_type" },
362  { "slice" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, .flags = VE, .unit = "int_ref_type" },
363  { "int_ref_cycle_size", "Number of frames in the intra refresh cycle", OFFSET(qsv.int_ref_cycle_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE },
364  { "int_ref_qp_delta", "QP difference for the refresh MBs", OFFSET(qsv.int_ref_qp_delta), AV_OPT_TYPE_INT, { .i64 = INT16_MIN }, INT16_MIN, INT16_MAX, VE },
365  { "int_ref_cycle_dist", "Distance between the beginnings of the intra-refresh cycles in frames", OFFSET(qsv.int_ref_cycle_dist), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT16_MAX, VE },
366 
367  { NULL },
368 };
369 
370 static const AVClass class = {
371  .class_name = "hevc_qsv encoder",
372  .item_name = av_default_item_name,
373  .option = options,
375 };
376 
378  { "b", "0" },
379  { "refs", "0" },
380  { "g", "248" },
381  { "bf", "-1" },
382  { "qmin", "-1" },
383  { "qmax", "-1" },
384  { "trellis", "-1" },
385  { NULL },
386 };
387 
389  .p.name = "hevc_qsv",
390  CODEC_LONG_NAME("HEVC (Intel Quick Sync Video acceleration)"),
391  .priv_data_size = sizeof(QSVHEVCEncContext),
392  .p.type = AVMEDIA_TYPE_VIDEO,
393  .p.id = AV_CODEC_ID_HEVC,
394  .init = qsv_enc_init,
396  .close = qsv_enc_close,
397  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
398  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
408  AV_PIX_FMT_NONE },
409  .p.priv_class = &class,
410  .defaults = qsv_enc_defaults,
411  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
413  .p.wrapper_name = "qsv",
414  .hw_configs = ff_qsv_enc_hw_configs,
415 };
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
GetByteContext
Definition: bytestream.h:33
QSV_OPTION_ADAPTIVE_B
#define QSV_OPTION_ADAPTIVE_B
Definition: qsvenc.h:100
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvenc_hevc.c:43
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
QSV_OPTION_DBLK_IDC
#define QSV_OPTION_DBLK_IDC
Definition: qsvenc.h:109
AVOption
AVOption.
Definition: opt.h:346
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
FF_CODEC_CAP_NOT_INIT_THREADSAFE
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
Definition: codec_internal.h:34
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:534
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
QSVEncContext::load_plugins
char * load_plugins
Definition: qsvenc.h:262
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
QSV_OPTION_MAX_MIN_QP
#define QSV_OPTION_MAX_MIN_QP
Definition: qsvenc.h:115
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
QSVEncContext::hevc_vps
int hevc_vps
Definition: qsvenc.h:206
FFCodecDefault
Definition: codec_internal.h:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:108
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:296
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:532
lrint
#define lrint
Definition: tablegen.h:53
ff_qsv_enc_hw_configs
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[]
Definition: qsvenc.c:2689
ff_hevc_qsv_encoder
const FFCodec ff_hevc_qsv_encoder
Definition: qsvenc_hevc.c:388
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
QSVEncContext
Definition: qsvenc.h:156
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
qsvenc.h
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
generate_fake_vps
static int generate_fake_vps(QSVEncContext *q, AVCodecContext *avctx)
Definition: qsvenc_hevc.c:54
get_bits.h
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
QSV_OPTION_MAX_SLICE_SIZE
#define QSV_OPTION_MAX_SLICE_SIZE
Definition: qsvenc.h:85
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
QSV_OPTION_RDO
#define QSV_OPTION_RDO
Definition: qsvenc.h:77
if
if(ret)
Definition: filter_design.txt:179
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
QSV_OPTION_LOW_DELAY_BRC
#define QSV_OPTION_LOW_DELAY_BRC
Definition: qsvenc.h:112
NULL
#define NULL
Definition: coverity.c:32
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
QSV_COMMON_OPTS
#define QSV_COMMON_OPTS
Definition: qsvenc.h:56
ff_hevc_parse_sps
int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, int apply_defdispwin, const HEVCVPS *const *vps_list, AVCodecContext *avctx)
Parse the SPS from the bitstream into the provided HEVCSPS struct.
Definition: hevc_ps.c:872
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
QSV_OPTION_ADAPTIVE_I
#define QSV_OPTION_ADAPTIVE_I
Definition: qsvenc.h:97
H2645RBSP::rbsp_buffer
uint8_t * rbsp_buffer
Definition: h2645_parse.h:75
PutByteContext
Definition: bytestream.h:37
ff_hevc_encode_nal_vps
int ff_hevc_encode_nal_vps(HEVCVPS *vps, unsigned int id, uint8_t *buf, int buf_size)
Definition: hevc_ps_enc.c:66
QSV_OPTION_SKIP_FRAME
#define QSV_OPTION_SKIP_FRAME
Definition: qsvenc.h:140
hevcdec.h
codec_internal.h
VE
#define VE
Definition: qsvenc_hevc.c:304
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:529
H2645NAL
Definition: h2645_parse.h:34
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
HEVC_MAX_SUB_LAYERS
@ HEVC_MAX_SUB_LAYERS
Definition: hevc.h:105
QSV_OPTION_SCENARIO
#define QSV_OPTION_SCENARIO
Definition: qsvenc.h:123
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvenc_hevc.c:44
qsv_enc_init
static av_cold int qsv_enc_init(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:242
QSV_OPTION_EXTBRC
#define QSV_OPTION_EXTBRC
Definition: qsvenc.h:94
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
ff_qsv_enc_close
int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:2641
QSVEncContext::set_encode_ctrl_cb
SetEncodeCtrlCB * set_encode_ctrl_cb
Definition: qsvenc.h:263
QSVHEVCEncContext
Definition: qsvenc_hevc.c:48
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
ff_h2645_extract_rbsp
int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645RBSP *rbsp, H2645NAL *nal, int small_padding)
Extract the raw (unescaped) bitstream.
Definition: h2645_parse.c:35
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:536
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
QSVHEVCEncContext::load_plugin
int load_plugin
Definition: qsvenc_hevc.c:51
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
options
static const AVOption options[]
Definition: qsvenc_hevc.c:305
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
qsv_hevc_set_encode_ctrl
static int qsv_hevc_set_encode_ctrl(AVCodecContext *avctx, const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
Definition: qsvenc_hevc.c:165
hevc.h
H2645RBSP::rbsp_buffer_alloc_size
int rbsp_buffer_alloc_size
Definition: h2645_parse.h:77
avcodec.h
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
ret
ret
Definition: filter_design.txt:187
QSV_OPTION_P_STRATEGY
#define QSV_OPTION_P_STRATEGY
Definition: qsvenc.h:103
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
LoadPlugin
LoadPlugin
Definition: qsvdec.c:968
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
QSV_OPTION_MAX_FRAME_SIZE
#define QSV_OPTION_MAX_FRAME_SIZE
Definition: qsvenc.h:80
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvenc_hevc.c:45
QSVHEVCEncContext::qsv
QSVEncContext qsv
Definition: qsvenc_hevc.c:50
AVCodecContext
main external API structure.
Definition: avcodec.h:445
QSVEncContext::idr_interval
int idr_interval
Definition: qsvenc.h:210
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
QSVEncContext::ver
mfxVersion ver
Definition: qsvenc.h:204
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
QSV_OPTION_AVBR
#define QSV_OPTION_AVBR
Definition: qsvenc.h:136
mem.h
ff_qsv_enc_init
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:1631
HEVCVPS
Definition: hevc_ps.h:154
mastering_display_metadata.h
AV_CODEC_CAP_HYBRID
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
Definition: codec.h:152
HEVCSPS
Definition: hevc_ps.h:188
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
bytestream.h
qsv_enc_defaults
static const FFCodecDefault qsv_enc_defaults[]
Definition: qsvenc_hevc.c:377
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
qsv_enc_frame
static int qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc_hevc.c:288
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
OFFSET
#define OFFSET(x)
Definition: qsvenc_hevc.c:303
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
H2645RBSP
Definition: h2645_parse.h:74
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_qsv_encode
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc.c:2573
qsv_enc_close
static av_cold int qsv_enc_close(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:296
QSV_OPTION_MBBRC
#define QSV_OPTION_MBBRC
Definition: qsvenc.h:91
QSV_OPTION_B_STRATEGY
#define QSV_OPTION_B_STRATEGY
Definition: qsvenc.h:106