FFmpeg
qsvenc_hevc.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV based HEVC encoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 
22 #include <stdint.h>
23 #include <sys/types.h>
24 
25 #include <mfxvideo.h>
26 
27 #include "libavutil/common.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/opt.h"
31 
32 #include "avcodec.h"
33 #include "bytestream.h"
34 #include "codec_internal.h"
35 #include "get_bits.h"
36 #include "h2645_parse.h"
37 #include "qsv.h"
38 #include "qsvenc.h"
39 
40 #include "hevc/hevc.h"
41 #include "hevc/ps.h"
42 
43 enum LoadPlugin {
47 };
48 
49 typedef struct QSVHEVCEncContext {
50  AVClass *class;
54 
56 {
57  GetByteContext gbc;
58  PutByteContext pbc;
59 
60  GetBitContext gb;
61  H2645RBSP sps_rbsp = { NULL };
62  H2645NAL sps_nal = { NULL };
63  HEVCSPS sps = { 0 };
64  HEVCVPS vps = { 0 };
65  uint8_t vps_buf[128], vps_rbsp_buf[128];
66  uint8_t *new_extradata;
67  unsigned int sps_id;
68  int ret, i, type, vps_size;
69 
70  if (!avctx->extradata_size) {
71  av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx\n");
72  return AVERROR_UNKNOWN;
73  }
74 
76  if (!sps_rbsp.rbsp_buffer)
77  return AVERROR(ENOMEM);
78 
79  /* parse the SPS */
80  ret = ff_h2645_extract_rbsp(avctx->extradata + 4, avctx->extradata_size - 4, &sps_rbsp, &sps_nal, 1);
81  if (ret < 0) {
82  av_log(avctx, AV_LOG_ERROR, "Error unescaping the SPS buffer\n");
83  return ret;
84  }
85 
86  ret = init_get_bits8(&gb, sps_nal.data, sps_nal.size);
87  if (ret < 0) {
88  av_freep(&sps_rbsp.rbsp_buffer);
89  return ret;
90  }
91 
92  get_bits(&gb, 1);
93  type = get_bits(&gb, 6);
94  if (type != HEVC_NAL_SPS) {
95  av_log(avctx, AV_LOG_ERROR, "Unexpected NAL type in the extradata: %d\n",
96  type);
97  av_freep(&sps_rbsp.rbsp_buffer);
98  return AVERROR_INVALIDDATA;
99  }
100  get_bits(&gb, 9);
101 
102  ret = ff_hevc_parse_sps(&sps, &gb, &sps_id, 0, 0, NULL, avctx);
103  av_freep(&sps_rbsp.rbsp_buffer);
104  if (ret < 0) {
105  av_log(avctx, AV_LOG_ERROR, "Error parsing the SPS\n");
106  return ret;
107  }
108 
109  /* generate the VPS */
110  vps.vps_max_layers = 1;
111  vps.vps_max_sub_layers = sps.max_sub_layers;
112  vps.vps_temporal_id_nesting_flag = sps.temporal_id_nesting;
113  memcpy(&vps.ptl, &sps.ptl, sizeof(vps.ptl));
114  vps.vps_sub_layer_ordering_info_present_flag = 1;
115  for (i = 0; i < HEVC_MAX_SUB_LAYERS; i++) {
116  vps.vps_max_dec_pic_buffering[i] = sps.temporal_layer[i].max_dec_pic_buffering;
117  vps.vps_num_reorder_pics[i] = sps.temporal_layer[i].num_reorder_pics;
118  vps.vps_max_latency_increase[i] = sps.temporal_layer[i].max_latency_increase;
119  }
120 
121  vps.vps_num_layer_sets = 1;
122  vps.vps_timing_info_present_flag = sps.vui.vui_timing_info_present_flag;
123  vps.vps_num_units_in_tick = sps.vui.vui_num_units_in_tick;
124  vps.vps_time_scale = sps.vui.vui_time_scale;
125  vps.vps_poc_proportional_to_timing_flag = sps.vui.vui_poc_proportional_to_timing_flag;
126  vps.vps_num_ticks_poc_diff_one = sps.vui.vui_num_ticks_poc_diff_one_minus1 + 1;
127  vps.vps_num_hrd_parameters = 0;
128 
129  /* generate the encoded RBSP form of the VPS */
130  ret = ff_hevc_encode_nal_vps(&vps, sps.vps_id, vps_rbsp_buf, sizeof(vps_rbsp_buf));
131  if (ret < 0) {
132  av_log(avctx, AV_LOG_ERROR, "Error writing the VPS\n");
133  return ret;
134  }
135 
136  /* escape and add the startcode */
137  bytestream2_init(&gbc, vps_rbsp_buf, ret);
138  bytestream2_init_writer(&pbc, vps_buf, sizeof(vps_buf));
139 
140  bytestream2_put_be32(&pbc, 1); // startcode
141  bytestream2_put_byte(&pbc, HEVC_NAL_VPS << 1); // NAL
142  bytestream2_put_byte(&pbc, 1); // header
143 
144  while (bytestream2_get_bytes_left(&gbc)) {
145  if (bytestream2_get_bytes_left(&gbc) >= 3 && bytestream2_peek_be24(&gbc) <= 3) {
146  bytestream2_put_be24(&pbc, 3);
147  bytestream2_skip(&gbc, 2);
148  } else
149  bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
150  }
151 
152  vps_size = bytestream2_tell_p(&pbc);
153  new_extradata = av_mallocz(vps_size + avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
154  if (!new_extradata)
155  return AVERROR(ENOMEM);
156  memcpy(new_extradata, vps_buf, vps_size);
157  memcpy(new_extradata + vps_size, avctx->extradata, avctx->extradata_size);
158 
159  av_freep(&avctx->extradata);
160  avctx->extradata = new_extradata;
161  avctx->extradata_size += vps_size;
162 
163  return 0;
164 }
165 
167  const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
168 {
169  QSVHEVCEncContext *q = avctx->priv_data;
170  AVFrameSideData *sd;
171 
172  if (!frame || !QSV_RUNTIME_VERSION_ATLEAST(q->qsv.ver, 1, 25))
173  return 0;
174 
176  if (sd) {
178 
179  // SEI is needed when both the primaries and luminance are set
180  if (mdm->has_primaries && mdm->has_luminance) {
181  const int mapping[3] = {1, 2, 0};
182  const int chroma_den = 50000;
183  const int luma_den = 10000;
184  int i;
185  mfxExtMasteringDisplayColourVolume *mdcv = av_mallocz(sizeof(mfxExtMasteringDisplayColourVolume));
186 
187  if (!mdcv)
188  return AVERROR(ENOMEM);
189 
190  mdcv->Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
191  mdcv->Header.BufferSz = sizeof(*mdcv);
192 
193  for (i = 0; i < 3; i++) {
194  const int j = mapping[i];
195 
196  mdcv->DisplayPrimariesX[i] =
197  FFMIN(lrint(chroma_den *
198  av_q2d(mdm->display_primaries[j][0])),
199  chroma_den);
200  mdcv->DisplayPrimariesY[i] =
201  FFMIN(lrint(chroma_den *
202  av_q2d(mdm->display_primaries[j][1])),
203  chroma_den);
204  }
205 
206  mdcv->WhitePointX =
207  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[0])),
208  chroma_den);
209  mdcv->WhitePointY =
210  FFMIN(lrint(chroma_den * av_q2d(mdm->white_point[1])),
211  chroma_den);
212 
213  mdcv->MaxDisplayMasteringLuminance =
214  lrint(luma_den * av_q2d(mdm->max_luminance));
215  mdcv->MinDisplayMasteringLuminance =
216  FFMIN(lrint(luma_den * av_q2d(mdm->min_luminance)),
217  mdcv->MaxDisplayMasteringLuminance);
218 
219  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)mdcv;
220  }
221  }
222 
224  if (sd) {
226  mfxExtContentLightLevelInfo * clli = av_mallocz(sizeof(mfxExtContentLightLevelInfo));
227 
228  if (!clli)
229  return AVERROR(ENOMEM);
230 
231  clli->Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
232  clli->Header.BufferSz = sizeof(*clli);
233 
234  clli->MaxContentLightLevel = FFMIN(clm->MaxCLL, 65535);
235  clli->MaxPicAverageLightLevel = FFMIN(clm->MaxFALL, 65535);
236 
237  enc_ctrl->ExtParam[enc_ctrl->NumExtParam++] = (mfxExtBuffer *)clli;
238  }
239 
240  return 0;
241 }
242 
244 {
245  QSVHEVCEncContext *q = avctx->priv_data;
246  int ret;
247 
248  if (q->load_plugin != LOAD_PLUGIN_NONE) {
249  static const char * const uid_hevcenc_sw = "2fca99749fdb49aeb121a5b63ef568f7";
250  static const char * const uid_hevcenc_hw = "6fadc791a0c2eb479ab6dcd5ea9da347";
251 
252  if (q->qsv.load_plugins[0]) {
253  av_log(avctx, AV_LOG_WARNING,
254  "load_plugins is not empty, but load_plugin is not set to 'none'."
255  "The load_plugin value will be ignored.\n");
256  } else {
258 
260  q->qsv.load_plugins = av_strdup(uid_hevcenc_sw);
261  else
262  q->qsv.load_plugins = av_strdup(uid_hevcenc_hw);
263 
264  if (!q->qsv.load_plugins)
265  return AVERROR(ENOMEM);
266  }
267  }
268 
269  // HEVC and H264 meaning of the value is shifted by 1, make it consistent
270  q->qsv.idr_interval++;
271 
273 
274  ret = ff_qsv_enc_init(avctx, &q->qsv);
275  if (ret < 0)
276  return ret;
277 
278  if (!q->qsv.hevc_vps) {
279  ret = generate_fake_vps(&q->qsv, avctx);
280  if (ret < 0) {
281  ff_qsv_enc_close(avctx, &q->qsv);
282  return ret;
283  }
284  }
285 
286  return 0;
287 }
288 
290  const AVFrame *frame, int *got_packet)
291 {
292  QSVHEVCEncContext *q = avctx->priv_data;
293 
294  return ff_qsv_encode(avctx, &q->qsv, pkt, frame, got_packet);
295 }
296 
298 {
299  QSVHEVCEncContext *q = avctx->priv_data;
300 
301  return ff_qsv_enc_close(avctx, &q->qsv);
302 }
303 
304 #define OFFSET(x) offsetof(QSVHEVCEncContext, x)
305 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
306 static const AVOption options[] = {
323 #if QSV_HAVE_HE
324  QSV_HE_OPTIONS
325 #endif
326 
327  { "idr_interval", "Distance (in I-frames) between IDR frames", OFFSET(qsv.idr_interval), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT_MAX, VE, .unit = "idr_interval" },
328  { "begin_only", "Output an IDR-frame only at the beginning of the stream", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, 0, 0, VE, .unit = "idr_interval" },
329  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VE, .unit = "load_plugin" },
330  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VE, .unit = "load_plugin" },
331  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VE, .unit = "load_plugin" },
332  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VE, .unit = "load_plugin" },
333 
334  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
335  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VE },
336 
337  { "look_ahead_depth", "Depth of look ahead in number frames, available when extbrc option is enabled", OFFSET(qsv.look_ahead_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, VE },
338  { "profile", NULL, OFFSET(qsv.profile), AV_OPT_TYPE_INT, { .i64 = MFX_PROFILE_UNKNOWN }, 0, INT_MAX, VE, .unit = "profile" },
339  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
340  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN }, INT_MIN, INT_MAX, VE, .unit = "profile" },
341  { "main10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAIN10 }, INT_MIN, INT_MAX, VE, .unit = "profile" },
342  { "mainsp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_MAINSP }, INT_MIN, INT_MAX, VE, .unit = "profile" },
343  { "rext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_REXT }, INT_MIN, INT_MAX, VE, .unit = "profile" },
344 #if QSV_VERSION_ATLEAST(1, 32)
345  { "scc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_PROFILE_HEVC_SCC }, INT_MIN, INT_MAX, VE, .unit = "profile" },
346 #endif
347  { "tier", "Set the encoding tier (only level >= 4 can support high tier)", OFFSET(qsv.tier), AV_OPT_TYPE_INT, { .i64 = MFX_TIER_HEVC_HIGH }, MFX_TIER_HEVC_MAIN, MFX_TIER_HEVC_HIGH, VE, .unit = "tier" },
348  { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_MAIN }, INT_MIN, INT_MAX, VE, .unit = "tier" },
349  { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_TIER_HEVC_HIGH }, INT_MIN, INT_MAX, VE, .unit = "tier" },
350 
351  { "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
352 
353  { "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
354  { "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
355  { "recovery_point_sei", "Insert recovery point SEI messages", OFFSET(qsv.recovery_point_sei), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
356  { "aud", "Insert the Access Unit Delimiter NAL", OFFSET(qsv.aud), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
357  { "pic_timing_sei", "Insert picture timing SEI with pic_struct_syntax element", OFFSET(qsv.pic_timing_sei), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
358  { "transform_skip", "Turn this option ON to enable transformskip", OFFSET(qsv.transform_skip), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 1, VE},
359  { "int_ref_type", "Intra refresh type. B frames should be set to 0", OFFSET(qsv.int_ref_type), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE, .unit = "int_ref_type" },
360  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags = VE, .unit = "int_ref_type" },
361  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags = VE, .unit = "int_ref_type" },
362  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, .flags = VE, .unit = "int_ref_type" },
363  { "slice" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, .flags = VE, .unit = "int_ref_type" },
364  { "int_ref_cycle_size", "Number of frames in the intra refresh cycle", OFFSET(qsv.int_ref_cycle_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT16_MAX, VE },
365  { "int_ref_qp_delta", "QP difference for the refresh MBs", OFFSET(qsv.int_ref_qp_delta), AV_OPT_TYPE_INT, { .i64 = INT16_MIN }, INT16_MIN, INT16_MAX, VE },
366  { "int_ref_cycle_dist", "Distance between the beginnings of the intra-refresh cycles in frames", OFFSET(qsv.int_ref_cycle_dist), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT16_MAX, VE },
367 
368  { NULL },
369 };
370 
371 static const AVClass class = {
372  .class_name = "hevc_qsv encoder",
373  .item_name = av_default_item_name,
374  .option = options,
376 };
377 
379  { "b", "0" },
380  { "refs", "0" },
381  { "g", "248" },
382  { "bf", "-1" },
383  { "qmin", "-1" },
384  { "qmax", "-1" },
385  { "trellis", "-1" },
386  { NULL },
387 };
388 
390  .p.name = "hevc_qsv",
391  CODEC_LONG_NAME("HEVC (Intel Quick Sync Video acceleration)"),
392  .priv_data_size = sizeof(QSVHEVCEncContext),
393  .p.type = AVMEDIA_TYPE_VIDEO,
394  .p.id = AV_CODEC_ID_HEVC,
395  .init = qsv_enc_init,
397  .close = qsv_enc_close,
398  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
399  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
409  AV_PIX_FMT_NONE },
410  .color_ranges = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG,
411  .p.priv_class = &class,
412  .defaults = qsv_enc_defaults,
413  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
415  .p.wrapper_name = "qsv",
416  .hw_configs = ff_qsv_enc_hw_configs,
417 };
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
GetByteContext
Definition: bytestream.h:33
QSV_OPTION_ADAPTIVE_B
#define QSV_OPTION_ADAPTIVE_B
Definition: qsvenc.h:99
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvenc_hevc.c:44
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:709
QSV_OPTION_DBLK_IDC
#define QSV_OPTION_DBLK_IDC
Definition: qsvenc.h:108
AVOption
AVOption.
Definition: opt.h:429
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
FF_CODEC_CAP_NOT_INIT_THREADSAFE
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
Definition: codec_internal.h:35
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:552
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
ff_hevc_parse_sps
int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, unsigned nuh_layer_id, int apply_defdispwin, const HEVCVPS *const *vps_list, AVCodecContext *avctx)
Parse the SPS from the bitstream into the provided HEVCSPS struct.
Definition: ps.c:1154
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
QSVEncContext::load_plugins
char * load_plugins
Definition: qsvenc.h:265
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
QSV_OPTION_MAX_MIN_QP
#define QSV_OPTION_MAX_MIN_QP
Definition: qsvenc.h:114
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
QSVEncContext::hevc_vps
int hevc_vps
Definition: qsvenc.h:209
FFCodecDefault
Definition: codec_internal.h:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
GetBitContext
Definition: get_bits.h:108
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:320
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:550
lrint
#define lrint
Definition: tablegen.h:53
ff_qsv_enc_hw_configs
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[]
Definition: qsvenc.c:2760
ff_hevc_qsv_encoder
const FFCodec ff_hevc_qsv_encoder
Definition: qsvenc_hevc.c:389
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
QSVEncContext
Definition: qsvenc.h:155
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
qsvenc.h
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
generate_fake_vps
static int generate_fake_vps(QSVEncContext *q, AVCodecContext *avctx)
Definition: qsvenc_hevc.c:55
hevc.h
get_bits.h
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
QSV_OPTION_MAX_SLICE_SIZE
#define QSV_OPTION_MAX_SLICE_SIZE
Definition: qsvenc.h:84
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
QSV_OPTION_RDO
#define QSV_OPTION_RDO
Definition: qsvenc.h:76
if
if(ret)
Definition: filter_design.txt:179
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
QSV_OPTION_LOW_DELAY_BRC
#define QSV_OPTION_LOW_DELAY_BRC
Definition: qsvenc.h:111
NULL
#define NULL
Definition: coverity.c:32
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
QSV_COMMON_OPTS
#define QSV_COMMON_OPTS
Definition: qsvenc.h:54
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
QSV_OPTION_ADAPTIVE_I
#define QSV_OPTION_ADAPTIVE_I
Definition: qsvenc.h:96
H2645RBSP::rbsp_buffer
uint8_t * rbsp_buffer
Definition: h2645_parse.h:75
PutByteContext
Definition: bytestream.h:37
QSV_OPTION_SKIP_FRAME
#define QSV_OPTION_SKIP_FRAME
Definition: qsvenc.h:139
codec_internal.h
VE
#define VE
Definition: qsvenc_hevc.c:305
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:547
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
H2645NAL
Definition: h2645_parse.h:34
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
ps.h
QSV_OPTION_SCENARIO
#define QSV_OPTION_SCENARIO
Definition: qsvenc.h:122
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvenc_hevc.c:45
qsv_enc_init
static av_cold int qsv_enc_init(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:243
QSV_OPTION_EXTBRC
#define QSV_OPTION_EXTBRC
Definition: qsvenc.h:93
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
ff_qsv_enc_close
int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:2708
ff_hevc_encode_nal_vps
int ff_hevc_encode_nal_vps(HEVCVPS *vps, unsigned int id, uint8_t *buf, int buf_size)
Definition: ps_enc.c:66
QSVEncContext::set_encode_ctrl_cb
SetEncodeCtrlCB * set_encode_ctrl_cb
Definition: qsvenc.h:266
QSVHEVCEncContext
Definition: qsvenc_hevc.c:49
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
ff_h2645_extract_rbsp
int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645RBSP *rbsp, H2645NAL *nal, int small_padding)
Extract the raw (unescaped) bitstream.
Definition: h2645_parse.c:36
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:555
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
QSVHEVCEncContext::load_plugin
int load_plugin
Definition: qsvenc_hevc.c:52
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
options
static const AVOption options[]
Definition: qsvenc_hevc.c:306
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:692
qsv_hevc_set_encode_ctrl
static int qsv_hevc_set_encode_ctrl(AVCodecContext *avctx, const AVFrame *frame, mfxEncodeCtrl *enc_ctrl)
Definition: qsvenc_hevc.c:166
H2645RBSP::rbsp_buffer_alloc_size
int rbsp_buffer_alloc_size
Definition: h2645_parse.h:77
avcodec.h
ret
ret
Definition: filter_design.txt:187
QSV_OPTION_P_STRATEGY
#define QSV_OPTION_P_STRATEGY
Definition: qsvenc.h:102
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
LoadPlugin
LoadPlugin
Definition: qsvdec.c:1067
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
QSV_OPTION_MAX_FRAME_SIZE
#define QSV_OPTION_MAX_FRAME_SIZE
Definition: qsvenc.h:79
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvenc_hevc.c:46
QSVHEVCEncContext::qsv
QSVEncContext qsv
Definition: qsvenc_hevc.c:51
AVCodecContext
main external API structure.
Definition: avcodec.h:451
QSVEncContext::idr_interval
int idr_interval
Definition: qsvenc.h:213
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
QSVEncContext::ver
mfxVersion ver
Definition: qsvenc.h:207
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:546
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
QSV_OPTION_AVBR
#define QSV_OPTION_AVBR
Definition: qsvenc.h:135
mem.h
ff_qsv_enc_init
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
Definition: qsvenc.c:1635
HEVCVPS
Definition: ps.h:171
mastering_display_metadata.h
AV_CODEC_CAP_HYBRID
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
Definition: codec.h:152
HEVCSPS
Definition: ps.h:252
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
bytestream.h
qsv_enc_defaults
static const FFCodecDefault qsv_enc_defaults[]
Definition: qsvenc_hevc.c:378
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
qsv_enc_frame
static int qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc_hevc.c:289
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
OFFSET
#define OFFSET(x)
Definition: qsvenc_hevc.c:304
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
H2645RBSP
Definition: h2645_parse.h:74
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_qsv_encode
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: qsvenc.c:2640
qsv_enc_close
static av_cold int qsv_enc_close(AVCodecContext *avctx)
Definition: qsvenc_hevc.c:297
QSV_OPTION_MBBRC
#define QSV_OPTION_MBBRC
Definition: qsvenc.h:90
HEVC_MAX_SUB_LAYERS
@ HEVC_MAX_SUB_LAYERS
Definition: hevc.h:105
QSV_OPTION_B_STRATEGY
#define QSV_OPTION_B_STRATEGY
Definition: qsvenc.h:105