[FFmpeg-devel] Added HW H.264 and HEVC encoding for AMD GPUs based on AMF SDK

Mark Thompson sw at jkqxz.net
Tue Oct 31 00:35:16 EET 2017


On 30/10/17 21:30, Mironov, Mikhail wrote:
>>> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter
>> *pThis,
>>> +    const wchar_t *scope, const wchar_t *message)
>>> +{
>>> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
>>> +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);
>>
>> Does the message necessarily include a newline already?
> 
> Yes.
> 
>>> +    init_fun = (AMFInit_Fn)dlsym(ctx->library,
>> AMF_INIT_FUNCTION_NAME);
>>> +    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN,
>> "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA,
>> AMF_INIT_FUNCTION_NAME);
>>
>> I think do s/ \n/\n/ for all of these messages.
> 
> Sorry, didn't get this.

Most of your messages end with a space before the newline, the space probably shouldn't be there.

>>> +
>>> +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library,
>> AMF_QUERY_VERSION_FUNCTION_NAME);
>>> +    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL,
>> AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n",
>> AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
>>> +
>>> +    res = version_fun(&ctx->version);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s
>> failed with error %d. \n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
>>> +    res = init_fun(AMF_FULL_VERSION, &ctx->factory);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s
>> failed with error %d. \n", AMF_INIT_FUNCTION_NAME, res);
>>> +    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,
>> "GetTrace() failed with error %d. \n", res);
>>> +    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,
>> "GetDebug() failed with error %d. \n", res);
>>> +    return 0;
>>> +}
>>> +
>>> +static int amf_init_context(AVCodecContext *avctx)
>>> +{
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +
>>> +    // the return of these functions indicates old state and do not affect
>> behaviour
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_CONSOLE, 0);
>>> +#if AMF_DEBUG_TRACE
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, 1);
>>> +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
>>> +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
>>> +#else
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, 0);
>>> +#endif
>>
>> I don't much like this compile-time option.  What sort of messages does the
>> trace writer actually give you?  Will a user ever want to enable it?
> 
> Two points:
> 1. There is extensive AMF logging that can help diagnose a problem. Do we want to have it all time in AV_LOG_DEBUG?
> 2. AMD can trace to debug output and this is useful but for normal ffmpeg operation it is under #ifdef.

Help who diagnose a problem?  Either it is useful to a user, in which case put it behind a real option, or it isn't, in which case don't include it at all.  A compile-time option just encourages bitrot on whichever side is not default.

>>> +
>>> +static GUID  AMFTextureArrayIndexGUID =
>> AMFTextureArrayIndexGUIDDef;
>>
>> GUID is a Windows type, should this be AMFGuid?  (I tried removing the
>> check and compiling on Linux, other than the D3D11 stuff this is the only
>> error.)
>>
> 
> This is Windows type and used with Windows interface ID3D11Texture2D.
> When Linux support is added all this section will be under #ifdef.

It might be cleaner to put it inside the function (see below).  Also, it should be const.

>>> +
>>> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>>> +                        const AVFrame *frame, int *got_packet)
>>> +{
>>> +    int             ret = 0;
>>> +    AMF_RESULT      res = AMF_OK;
>>> +    AmfContext     *ctx = avctx->priv_data;
>>> +    AMFSurface     *surface = NULL;
>>> +    AMFData        *data = NULL;
>>> +    amf_bool       submitted = 0;
>>> +
>>> +    while (!submitted) {
>>> +        if (!frame) { // submit drain
>>> +            if (!ctx->eof) { // submit drain onre time only
>>> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
>>> +                if (res == AMF_INPUT_FULL) {
>>> +                    av_usleep(1000); // input queue is full: wait, poll and submit
>> Drain again
>>> +                                     // need to get some output and try again
>>> +                } else if (res == AMF_OK) {
>>> +                    ctx->eof = 1; // drain started
>>> +                    submitted = 1;
>>> +                }
>>> +            }
>>> +        } else { // submit frame
>>> +            if (surface == NULL) { // prepare surface from frame one time only
>>> +                if (frame->hw_frames_ctx && ( // HW frame detected
>>> +                                              // check if the same hw_frames_ctx as used in
>> initialization
>>> +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-
>>> hw_frames_ctx->data) ||
>>> +                    // check if the same hw_device_ctx as used in initialization
>>> +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-
>>> hw_frames_ctx->data)->device_ctx ==
>>> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
>>> +                )) {

(Here.)

>>> +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-
>>> data[0]; // actual texture
>>> +                    int index = (int)(size_t)frame->data[1]; // index is a slice in
>> texture array is - set to tell AMF which slice to use
>>
>> (int)(intptr_t)frame->data[1];
>>
>>> +                    texture->lpVtbl->SetPrivateData(texture,
>> &AMFTextureArrayIndexGUID, sizeof(index), &index);
>>> +
>>> +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-
>>> context, texture, &surface, NULL); // wrap to AMF surface
>>> +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-
>>> height); // decode surfaces are vertically aligned by 16 tell AMF real size
>>
>> "decode surfaces"?  These need not come from a decoder.  Does it work with
>> hwupload?
>>
>>> +                    surface->pVtbl->SetPts(surface, frame->pts);
>>> +                } else {
>>> +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,
>> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
>>> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,
>> "AllocSurface() failed  with error %d \n", res);
>>> +                    amf_copy_surface(avctx, frame, surface);
>>> +                }
>>> +            }
>>> +            // encode
>>> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,
>> (AMFData*)surface);
>>> +            if (res == AMF_INPUT_FULL) { // handle full queue
>>> +                av_usleep(1000); // input queue is full: wait, poll and submit
>> surface again
>>> +            } else {
>>> +                surface->pVtbl->Release(surface);
>>> +                surface = NULL;
>>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,
>> AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);
>>> +                submitted = 1;
>>> +            }
>>> +        }
>>> +        // poll results
>>> +        if (!data) {
>>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
>>> +            if (data) {
>>> +                AMFBuffer* buffer;
>>> +                AMFGuid guid = IID_AMFBuffer();
>>> +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //
>> query for buffer interface
>>> +                ret = amf_copy_buffer(avctx, pkt, buffer);
>>> +                if (!ret)
>>> +                    *got_packet = 1;
>>> +                buffer->pVtbl->Release(buffer);
>>> +                data->pVtbl->Release(data);
>>> +                if (ctx->eof) {
>>> +                    submitted = 1; // we are in the drain state - no submissions
>>> +                }
>>> +            } else if (res == AMF_EOF) {
>>> +                submitted = 1; // drain complete
>>> +            } else {
>>> +                if (!submitted) {
>>> +                    av_usleep(1000); // wait and poll again
>>> +                }
>>> +            }
>>> +        }
>>> +    }
>>> +    return ret;
>>> +}
>>
>> I still think this would be much better off using the
>> send_frame()/receive_packet() API.  Even if your API doesn't expose any
>> information about the queue length, you only need to hold a single input
>> frame transiently to get around that (the user is not allowed to call
>> send_frame() twice in a row without calling receive_packet()).
>>
> 
> So to implement this I would have to:
> - in the send_frame() if AMF_INPUT_FULL is returned - store input frame (or copy?)
> - In the next receive_frame() check if frame is stored
> - Wait till some output is produced
> - resubmit stored frame

Sounds about right.

> Issues I see:
> - Isn't this logic defeat the purpose of independent send()/receive()?
> - How can I report a error if receive() produced a compressed frame but the delayed submission failed?

Since this is asynchronous anyway, just report it at the next available opportunity.

> - This logic depends on the particular logic in the calling code.

The API requires this behaviour of the caller.  See the documentation in avcodec.h.

> - This logic depends on the particular HW behaviour. 

How so?

> - In the future, we would like to output individual slices of a compressed frame. 
> When this added receive_frame() must be called several times to clear space in the HW queue. 
> Granted, current implementation also does not cover this case but truly independent 
> send/receive implementation would. 

Note that the user is required to call receive_packet() repeatedly until it returns EAGAIN, and only then are they allowed to call send_frame() again.

>>> +static const AVOption options[] = {
>>> +    // Static
>>> +    /// Usage
>>> +    { "usage",          "Encoder Usage",        OFFSET(usage),
>> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING
>> }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING,
>> AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
>>> +    { "transcoding",    "Generic Transcoding",  0,
>> AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
>>> +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64
>> = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage"
>> },
>>> +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
>>> +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, {
>> .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
>>> +
>>> +    /// Profile,
>>> +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, {
>> .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       },
>> AMF_VIDEO_ENCODER_PROFILE_BASELINE,
>> AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
>>> +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },
>>
>> You still don't support baseline profile.
> 
> Talked to codec folks. Currently this is really baseline by mistake. The intention was to expose only 
> "constrained baseline" They want to correct this but it should go into the driver first and then 
> reflected in AMF API. Once done this entry will be updated.

Ok, so baseline profile will not be included at all, and then constrained baseline added later?  That sounds fine.

>>> +    /// Maximum Access Unit Size
>>> +    { "max_au_size",    "Maximum Access Unit Size for rate control (in bits)",
>> OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE,
>> NULL },
>>
>> Did you check whether this really means the maximum access unit size?  If
>> yes, what is the use-case for that?
>>
> 
> I've changed the description. This parameter is used in rate control to limit AU size. 
> It is useful for streaming.

When do you want to explicitly set an access unit size limit?  That sort of thing is better limited by setting maxrate with a shorter window, IMO.

>>> +    { "me_half_pel",    "Enable ME Half Pixel",
>> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
>>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",
>> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
>>
>> What is the use-case for these options?
>>
> 
> These are options for motion estimator precision. Spelling is corrected "me_quarter_pel"

What I mean is, why would anyone ever set these options to zero?

>>> +
>>> +    { NULL }
>>> +};
>>> +
>>> +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
>>> +{
>>> +    int                 ret = 0;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMFVariantStruct    var = {0};
>>> +    amf_int64           profile = 0;
>>> +    amf_int64           profile_level = 0;
>>> +    AMFBuffer          *buffer;
>>> +    AMFGuid             guid;
>>> +
>>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-
>>> height);
>>> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,
>> avctx->time_base.num * avctx->ticks_per_frame);
>>
>> avctx->framerate should be set if the input is CFR, use that first.
>>
>>> +
>>> +    int                 deblocking_filter = (avctx->flags &
>> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
>>> +
>>> +    if ((ret = ff_amf_encode_init(avctx)) != 0)
>>> +        return ret;
>>> +
>>> +    // Static parameters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_USAGE, ctx->usage);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FRAMERATE, framerate);
>>> +
>>> +    profile = avctx->profile;
>>
>> avctx->profile might be (is by default, even) FF_PROFILE_UNKNOWN, which is
>> not zero.
>>
>>> +    if (profile == 0) {
>>> +        profile = ctx->profile;
>>> +    }
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PROFILE, profile);
>>> +
>>> +    profile_level = avctx->level;
>>
>> Similarly FF_LEVEL_UNKNOWN.
>>
>>> +    if (profile_level == 0) {
>>> +        profile_level = ctx->level;
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);
>>> +
>>> +    // Maximum Reference Frames
>>> +    if (avctx->refs != -1) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
>>> +    }
>>> +    if (avctx->sample_aspect_ratio.den && avctx-
>>> sample_aspect_ratio.num) {
>>> +        AMFRatio ratio = AMFConstructRatio(avctx-
>>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
>>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
>>> +    }
>>> +
>>> +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
>>> +    if (avctx->color_range == AVCOL_RANGE_JPEG) {
>>> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
>>> +    }
>>> +
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE,
>> AMF_VIDEO_ENCODER_PREENCODE_DISABLED);
>>> +        if (ctx->preanalysis)
>>> +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by
>> cqp Rate Control Method, automatically disabled. \n");
>>> +    } else {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-
>>> preanalysis);
>>> +    }
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
>>> +
>>> +    // Initialize Encoder
>>> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx-
>>> width, avctx->height);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder-
>>> Init() failed with error %d \n", res);
>>> +
>>> +    // Dynamic parmaters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx-
>>> rate_control_mode);
>>> +
>>> +    /// VBV Buffer
>>> +    if (avctx->rc_buffer_size != 0)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
>>> +    if (avctx->rc_initial_buffer_occupancy != 0) {
>>> +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx-
>>> rc_initial_buffer_occupancy;
>>> +        if (amf_buffer_fullness > 64)
>>> +            amf_buffer_fullness = 64;
>>
>> I still don't understand what this is trying to do.
>>
>> rc_initial_buffer_occupancy is necessarily at most rc_buffer_size, so the
>> calculation will always get a number >= 64, so you always pass 64.
>>
>> What are the units of
>> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS meant to be?
>>
> 
> They meant to be an abstract value from 0 to 64 meaning 64 is 100%. Don’t ask me why ☹ 
> Calculation should be the opposite. My fault. 

Right, that makes more sense.

>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS,
>> amf_buffer_fullness);
>>> +    }
>>> +    /// Maximum Access Unit Size
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
>>> +
>>> +    // QP Minimum / Maximum
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MIN_QP, 0);
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_QP, 51);
>>> +    } else {
>>> +        if (avctx->qmin != -1) {
>>> +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
>>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MIN_QP, qval);
>>> +        }
>>> +        if (avctx->qmax != -1) {
>>> +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
>>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_QP, qval);
>>> +        }
>>> +    }
>>> +    // QP Values
>>> +    if (ctx->qp_i != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
>>> +    if (ctx->qp_p != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
>>> +    if (ctx->qp_b != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
>>> +
>>> +    // Bitrate
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
>>> +
>>> +    // Peak (max) bitrate. If not set make it out of bit_rate for best results.
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
>>> +    } else {
>>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-
>>> rc_max_rate : avctx->bit_rate * 13 / 10;
>>
>> Please calculate a real value here as suggested in the previous comments
>> rather than using 13/10.
> 
> The suggestion was to set rc_max_rate to infinity. This will produce unpredicted results. 
> I can set it to bit_rate but quality will be not good. Another option would be to generate error.
> I am open to suggestions.

Is the window over which rc_max_rate applies defined anywhere?  If so, then if rc_buffer_size is set you can calculate rc_max_rate as (rc_buffer_size + bit_rate * window) / window (i.e. the maximum number of bits which could be included in one window region given the buffer constraints).

If rc_buffer_size isn't set either, then it isn't meant to be constrained - the average should be right over the long term, but locally it doesn't matter.  Hence infinity (or at least some very large value), to not impose any constraint.

>>> +    { "max_au_size",    "Max AU Size in bits",
>> OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL
>> },
>>
>> Same question as in H.264.  Also other stuff below.
>>
>>> +    { "min_qp_i",       "min quantization parameter for I-frame",
>> OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "max_qp_i",       "max quantization parameter for I-frame",
>> OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "min_qp_p",       "min quantization parameter for P-frame",
>> OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "max_qp_p",       "max quantization parameter for P-frame",
>> OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),
>> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),
>> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "skip_frame",     "Rate Control Based Frame Skip",
>> OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
>>> +    { "me_half_pel",    "Enable ME Half Pixel",
>> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
>>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",
>> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
>>> +
>>> +    { NULL }
>>> +};
>>> +
>>> +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
>>> +{
>>> +    int                 ret = 0;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMFVariantStruct    var = {0};
>>> +    amf_int64           profile = 0;
>>> +    amf_int64           profile_level = 0;
>>> +    AMFBuffer          *buffer;
>>> +    AMFGuid             guid;
>>> +
>>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-
>>> height);
>>> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,
>> avctx->time_base.num * avctx->ticks_per_frame);
>>> +
>>> +    int                 deblocking_filter = (avctx->flags &
>> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
>>> +
>>> +    if ((ret = ff_amf_encode_init(avctx)) < 0)
>>> +        return ret;
>>> +
>>> +    // init static parameters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
>>> +
>>> +    switch (avctx->profile) {
>>> +    case FF_PROFILE_HEVC_MAIN:
>>> +        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
>>> +        break;
>>> +    default:
>>> +        break;
>>> +    }
>>> +    if (profile == 0) {
>>> +        profile = ctx->profile;
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
>>> +
>>> +    profile_level = avctx->level;
>>> +    if (profile_level == 0) {
>>> +        profile_level = ctx->level;
>>> +    }
>>> +    if (profile_level != 0) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
>>> +    // Maximum Reference Frames
>>> +    if (avctx->refs != 0) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
>>> +    }
>>> +    // Aspect Ratio
>>> +    if (avctx->sample_aspect_ratio.den && avctx-
>>> sample_aspect_ratio.num) {
>>> +        AMFRatio ratio = AMFConstructRatio(avctx-
>>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
>>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
>>> +    }
>>> +
>>> +    // Picture control properties
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
>>> +    if (avctx->slices > 1) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,
>> deblocking_filter);
>>
>> What about SAO?
> 
> SAO ???

You're looking at AV_CODEC_FLAG_LOOP_FILTER to disable this, so you might want to consider both loop filters in H.265, not just the deblocking filter.

- Mark


More information about the ffmpeg-devel mailing list