[FFmpeg-devel] [PATCH v2 25/36] vaapi_encode_mjpeg: Use CBS to store parameters and write headers
Xiang, Haihao
haihao.xiang at intel.com
Fri Jun 15 04:37:18 EEST 2018
On Fri, 2018-06-08 at 00:43 +0100, Mark Thompson wrote:
> Also adds greyscale, 4:2:2, 4:4:4 and RGB support.
> ---
> configure | 2 +-
> doc/encoders.texi | 17 +-
> libavcodec/vaapi_encode_mjpeg.c | 529 +++++++++++++++++++++++++------------
> ---
> 3 files changed, 347 insertions(+), 201 deletions(-)
>
> diff --git a/configure b/configure
> index d908283954..cde32a8fad 100755
> --- a/configure
> +++ b/configure
> @@ -2939,7 +2939,7 @@ mjpeg_cuvid_decoder_deps="cuvid"
> mjpeg_qsv_encoder_deps="libmfx"
> mjpeg_qsv_encoder_select="qsvenc"
> mjpeg_vaapi_encoder_deps="VAEncPictureParameterBufferJPEG"
> -mjpeg_vaapi_encoder_select="vaapi_encode jpegtables"
> +mjpeg_vaapi_encoder_select="cbs_jpeg jpegtables vaapi_encode"
> mpeg1_cuvid_decoder_deps="cuvid"
> mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
> mpeg2_crystalhd_decoder_select="crystalhd"
> diff --git a/doc/encoders.texi b/doc/encoders.texi
> index b451142cfb..ceddfdda64 100644
> --- a/doc/encoders.texi
> +++ b/doc/encoders.texi
> @@ -2662,8 +2662,21 @@ Include access unit delimiters in the stream (not
> included by default).
> @end table
>
> @item mjpeg_vaapi
> -Always encodes using the standard quantisation and huffman tables -
> - at option{global_quality} scales the standard quantisation table (range 1-100).
> +Only baseline DCT encoding is supported. The encoder always uses the
> standard
> +quantisation and huffman tables - @option{global_quality} scales the standard
> +quantisation table (range 1-100).
> +
> +For YUV, 4:2:0, 4:2:2 and 4:4:4 subsampling modes are supported. RGB is also
> +supported, and will create an RGB JPEG.
> +
> + at table @option
> + at item jfif
> +Include JFIF header in each frame (not included by default).
> + at item huffman
> +Include standard huffman tables (on by default). Turning this off will save
> +a few hundred bytes in each output frame, but may lose compatibility with
> some
> +JPEG decoders which don't fully handle MJPEG.
> + at end table
>
> @item mpeg2_vaapi
> @option{profile} and @option{level} set the value of
> @emph{profile_and_level_indication}.
> diff --git a/libavcodec/vaapi_encode_mjpeg.c b/libavcodec/vaapi_encode_mjpeg.c
> index f76645425a..2f79070e58 100644
> --- a/libavcodec/vaapi_encode_mjpeg.c
> +++ b/libavcodec/vaapi_encode_mjpeg.c
> @@ -23,9 +23,12 @@
> #include "libavutil/common.h"
> #include "libavutil/internal.h"
> #include "libavutil/opt.h"
> -#include "libavutil/pixfmt.h"
> +#include "libavutil/pixdesc.h"
>
> #include "avcodec.h"
> +#include "bytestream.h"
> +#include "cbs.h"
> +#include "cbs_jpeg.h"
> #include "internal.h"
> #include "jpegtables.h"
> #include "mjpeg.h"
> @@ -58,253 +61,346 @@ static const unsigned char
> vaapi_encode_mjpeg_quant_chrominance[64] = {
> typedef struct VAAPIEncodeMJPEGContext {
> VAAPIEncodeContext common;
>
> + // User options.
> + int jfif;
> + int huffman;
> +
> + // Derived settings.
> int quality;
> - int component_subsample_h[3];
> - int component_subsample_v[3];
> + uint8_t jfif_data[14];
> +
> + // Writer structures.
> + JPEGRawFrameHeader frame_header;
> + JPEGRawScan scan;
> + JPEGRawApplicationData jfif_header;
> + JPEGRawQuantisationTableSpecification quant_tables;
> + JPEGRawHuffmanTableSpecification huffman_tables;
>
> - VAQMatrixBufferJPEG quant_tables;
> - VAHuffmanTableBufferJPEGBaseline huffman_tables;
> + CodedBitstreamContext *cbc;
> + CodedBitstreamFragment current_fragment;
> } VAAPIEncodeMJPEGContext;
>
> -static av_cold void vaapi_encode_mjpeg_copy_huffman(unsigned char
> *dst_lengths,
> - unsigned char
> *dst_values,
> - const unsigned char
> *src_lengths,
> - const unsigned char
> *src_values)
> +static int vaapi_encode_mjpeg_write_image_header(AVCodecContext *avctx,
> + VAAPIEncodePicture *pic,
> + VAAPIEncodeSlice *slice,
> + char *data, size_t
> *data_len)
> {
> - int i, mt;
> -
> - ++src_lengths;
> + VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + CodedBitstreamFragment *frag = &priv->current_fragment;
> + int err;
> +
> + if (priv->jfif) {
> + err = ff_cbs_insert_unit_content(priv->cbc, frag, -1,
> + JPEG_MARKER_APPN + 0,
> + &priv->jfif_header, NULL);
> + if (err < 0)
> + goto fail;
> + }
>
> - mt = 0;
> - for (i = 0; i < 16; i++)
> - mt += (dst_lengths[i] = src_lengths[i]);
> + err = ff_cbs_insert_unit_content(priv->cbc, frag, -1,
> + JPEG_MARKER_DQT,
> + &priv->quant_tables, NULL);
> + if (err < 0)
> + goto fail;
> +
> + err = ff_cbs_insert_unit_content(priv->cbc, frag, -1,
> + JPEG_MARKER_SOF0,
> + &priv->frame_header, NULL);
> + if (err < 0)
> + goto fail;
> +
> + if (priv->huffman) {
> + err = ff_cbs_insert_unit_content(priv->cbc, frag, -1,
> + JPEG_MARKER_DHT,
> + &priv->huffman_tables, NULL);
> + if (err < 0)
> + goto fail;
> + }
>
> - for (i = 0; i < mt; i++)
> - dst_values[i] = src_values[i];
> -}
> + err = ff_cbs_insert_unit_content(priv->cbc, frag, -1,
> + JPEG_MARKER_SOS,
> + &priv->scan, NULL);
> + if (err < 0)
> + goto fail;
>
> -static av_cold void vaapi_encode_mjpeg_init_tables(AVCodecContext *avctx)
> -{
> - VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> - VAQMatrixBufferJPEG *quant = &priv->quant_tables;
> - VAHuffmanTableBufferJPEGBaseline *huff = &priv->huffman_tables;
> - int i;
> -
> - quant->load_lum_quantiser_matrix = 1;
> - quant->load_chroma_quantiser_matrix = 1;
> + err = ff_cbs_write_fragment_data(priv->cbc, frag);
> + if (err < 0) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to write image header.\n");
> + return err;
Should it be 'goto fail' ? Some new units have been inserted to the fragment so
ff_cbs_fragment_uninit() should be called to release resources.
> + }
>
> - for (i = 0; i < 64; i++) {
> - quant->lum_quantiser_matrix[i] =
> - vaapi_encode_mjpeg_quant_luminance[i];
> - quant->chroma_quantiser_matrix[i] =
> - vaapi_encode_mjpeg_quant_chrominance[i];
> + if (*data_len < 8 * frag->data_size) {
> + av_log(avctx, AV_LOG_ERROR, "Image header too large: "
> + "%zu < %zu.\n", *data_len, 8 * frag->data_size);
> + err = AVERROR(ENOSPC);
> + goto fail;
Could you change the last parameter name of this function to bit_len or
bit_data_len? I think it is more readable and user don't need to think why frag-
>data_size is multiplied by 8.
> }
>
> - huff->load_huffman_table[0] = 1;
> - vaapi_encode_mjpeg_copy_huffman(huff->huffman_table[0].num_dc_codes,
> - huff->huffman_table[0].dc_values,
> - avpriv_mjpeg_bits_dc_luminance,
> - avpriv_mjpeg_val_dc);
> - vaapi_encode_mjpeg_copy_huffman(huff->huffman_table[0].num_ac_codes,
> - huff->huffman_table[0].ac_values,
> - avpriv_mjpeg_bits_ac_luminance,
> - avpriv_mjpeg_val_ac_luminance);
> - memset(huff->huffman_table[0].pad, 0, sizeof(huff-
> >huffman_table[0].pad));
> -
> - huff->load_huffman_table[1] = 1;
> - vaapi_encode_mjpeg_copy_huffman(huff->huffman_table[1].num_dc_codes,
> - huff->huffman_table[1].dc_values,
> - avpriv_mjpeg_bits_dc_chrominance,
> - avpriv_mjpeg_val_dc);
> - vaapi_encode_mjpeg_copy_huffman(huff->huffman_table[1].num_ac_codes,
> - huff->huffman_table[1].ac_values,
> - avpriv_mjpeg_bits_ac_chrominance,
> - avpriv_mjpeg_val_ac_chrominance);
> - memset(huff->huffman_table[1].pad, 0, sizeof(huff-
> >huffman_table[1].pad));
> -}
> + // Remove the EOI at the end of the fragment.
> + memcpy(data, frag->data, frag->data_size - 2);
> + *data_len = 8 * (frag->data_size - 2);
>
> -static void vaapi_encode_mjpeg_write_marker(PutBitContext *pbc, int marker)
> -{
> - put_bits(pbc, 8, 0xff);
> - put_bits(pbc, 8, marker);
> + err = 0;
> +fail:
> + ff_cbs_fragment_uninit(priv->cbc, frag);
> + return err;
> }
>
> -static int vaapi_encode_mjpeg_write_image_header(AVCodecContext *avctx,
> +static int vaapi_encode_mjpeg_write_extra_buffer(AVCodecContext *avctx,
> VAAPIEncodePicture *pic,
> - VAAPIEncodeSlice *slice,
> + int index, int *type,
> char *data, size_t
> *data_len)
> {
> - VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> - VAEncPictureParameterBufferJPEG *vpic = pic->codec_picture_params;
> - VAEncSliceParameterBufferJPEG *vslice = slice->codec_slice_params;
> - PutBitContext pbc;
> - int t, i, quant_scale;
> + VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + int t, i, k;
>
> - init_put_bits(&pbc, data, *data_len);
> + if (index == 0) {
> + // Write quantisation tables.
> + JPEGRawFrameHeader *fh = &priv->frame_header;
> + JPEGRawQuantisationTableSpecification *dqt = &priv->quant_tables;
> + VAQMatrixBufferJPEG *quant;
> +
> + if (*data_len < sizeof(*quant))
> + return AVERROR(ENOSPC);
> + *type = VAQMatrixBufferType;
> + *data_len = sizeof(*quant);
> +
> + quant = (VAQMatrixBufferJPEG*)data;
> + memset(quant, 0, sizeof(*quant));
> +
> + quant->load_lum_quantiser_matrix = 1;
> + for (i = 0; i < 64; i++)
> + quant->lum_quantiser_matrix[i] = dqt->table[fh->Tq[0]].Q[i];
> +
> + if (fh->Nf > 1) {
> + quant->load_chroma_quantiser_matrix = 1;
> + for (i = 0; i < 64; i++)
> + quant->chroma_quantiser_matrix[i] =
> + dqt->table[fh->Tq[1]].Q[i];
> + }
>
> - vaapi_encode_mjpeg_write_marker(&pbc, SOI);
> + } else if (index == 1) {
> + // Write huffman tables.
> + JPEGRawScanHeader *sh = &priv->scan.header;
> + JPEGRawHuffmanTableSpecification *dht = &priv->huffman_tables;
> + VAHuffmanTableBufferJPEGBaseline *huff;
> +
> + if (*data_len < sizeof(*huff))
> + return AVERROR(ENOSPC);
> + *type = VAHuffmanTableBufferType;
> + *data_len = sizeof(*huff);
> +
> + huff = (VAHuffmanTableBufferJPEGBaseline*)data;
> + memset(huff, 0, sizeof(*huff));
> +
> + for (t = 0; t < 1 + (sh->Ns > 1); t++) {
> + const JPEGRawHuffmanTable *ht;
> +
> + huff->load_huffman_table[t] = 1;
> +
> + ht = &dht->table[2 * t];
> + for (i = k = 0; i < 16; i++)
> + k += (huff->huffman_table[t].num_dc_codes[i] = ht->L[i]);
> + av_assert0(k <= sizeof(huff->huffman_table[t].dc_values));
> + for (i = 0; i < k; i++)
> + huff->huffman_table[t].dc_values[i] = ht->V[i];
> +
> + ht = &dht->table[2 * t + 1];
> + for (i = k = 0; i < 16; i++)
> + k += (huff->huffman_table[t].num_ac_codes[i] = ht->L[i]);
> + av_assert0(k <= sizeof(huff->huffman_table[t].ac_values));
> + for (i = 0; i < k; i++)
> + huff->huffman_table[t].ac_values[i] = ht->V[i];
> + }
>
> - // Quantisation table coefficients are scaled for quality by the driver,
> - // so we also need to do it ourselves here so that headers match.
> - if (priv->quality < 50)
> - quant_scale = 5000 / priv->quality;
> + } else {
> + return AVERROR_EOF;
> + }
> + return 0;
> +}
> +
> +static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
> + VAAPIEncodePicture *pic)
> +{
> + VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + JPEGRawFrameHeader *fh = &priv->frame_header;
> + JPEGRawScanHeader *sh = &priv->scan.header;
> + VAEncPictureParameterBufferJPEG *vpic = pic->codec_picture_params;
> + const AVPixFmtDescriptor *desc;
> + const uint8_t *components;
> + int t, i, quant_scale, len;
> +
> + desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
> + av_assert0(desc);
> + if (desc->flags & AV_PIX_FMT_FLAG_RGB)
> + components = (uint8_t[3]) { 'R', 'G', 'B' };
> else
> - quant_scale = 200 - 2 * priv->quality;
> + components = (uint8_t[3]) { 1, 2, 3 };
>
> - for (t = 0; t < 2; t++) {
> - int q;
> + // Frame header.
>
> - vaapi_encode_mjpeg_write_marker(&pbc, DQT);
> + fh->P = 8;
> + fh->Y = avctx->height;
> + fh->X = avctx->width;
> + fh->Nf = desc->nb_components;
>
> - put_bits(&pbc, 16, 3 + 64); // Lq
> - put_bits(&pbc, 4, 0); // Pq
> - put_bits(&pbc, 4, t); // Tq
> + for (i = 0; i < fh->Nf; i++) {
> + fh->C[i] = components[i];
> + fh->H[i] = 1 + (i == 0 ? desc->log2_chroma_w : 0);
> + fh->V[i] = 1 + (i == 0 ? desc->log2_chroma_h : 0);
>
> - for (i = 0; i < 64; i++) {
> - q = i[t ? priv->quant_tables.chroma_quantiser_matrix
> - : priv->quant_tables.lum_quantiser_matrix];
> - q = (q * quant_scale) / 100;
> - if (q < 1) q = 1;
> - if (q > 255) q = 255;
> - put_bits(&pbc, 8, q);
> - }
> + fh->Tq[i] = !!i;
> }
>
> - vaapi_encode_mjpeg_write_marker(&pbc, SOF0);
> + fh->Lf = 8 + 3 * fh->Nf;
>
> - put_bits(&pbc, 16, 8 + 3 * vpic->num_components); // Lf
> - put_bits(&pbc, 8, vpic->sample_bit_depth); // P
> - put_bits(&pbc, 16, vpic->picture_height); // Y
> - put_bits(&pbc, 16, vpic->picture_width); // X
> - put_bits(&pbc, 8, vpic->num_components); // Nf
> + // JFIF header.
> + if (priv->jfif) {
> + JPEGRawApplicationData *app = &priv->jfif_header;
> + AVRational sar = pic->input_image->sample_aspect_ratio;
> + int sar_w, sar_h;
> + PutByteContext pbc;
>
> - for (i = 0; i < vpic->num_components; i++) {
> - put_bits(&pbc, 8, vpic->component_id[i]); // Ci
> - put_bits(&pbc, 4, priv->component_subsample_h[i]); // Hi
> - put_bits(&pbc, 4, priv->component_subsample_v[i]); // Vi
> - put_bits(&pbc, 8, vpic->quantiser_table_selector[i]); // Tqi
> - }
> -
> - for (t = 0; t < 4; t++) {
> - int mt;
> - unsigned char *lengths, *values;
> + bytestream2_init_writer(&pbc, priv->jfif_data,
> + sizeof(priv->jfif_data));
>
> - vaapi_encode_mjpeg_write_marker(&pbc, DHT);
> + bytestream2_put_buffer(&pbc, "JFIF", 5);
> + bytestream2_put_be16(&pbc, 0x0102);
> + bytestream2_put_byte(&pbc, 0);
>
> - if ((t & 1) == 0) {
> - lengths = priv->huffman_tables.huffman_table[t / 2].num_dc_codes;
> - values = priv->huffman_tables.huffman_table[t / 2].dc_values;
> + av_reduce(&sar_w, &sar_h, sar.num, sar.den, 65535);
> + if (sar_w && sar_h) {
> + bytestream2_put_be16(&pbc, sar_w);
> + bytestream2_put_be16(&pbc, sar_h);
> } else {
> - lengths = priv->huffman_tables.huffman_table[t / 2].num_ac_codes;
> - values = priv->huffman_tables.huffman_table[t / 2].ac_values;
> + bytestream2_put_be16(&pbc, 1);
> + bytestream2_put_be16(&pbc, 1);
> }
>
> - mt = 0;
> - for (i = 0; i < 16; i++)
> - mt += lengths[i];
> + bytestream2_put_byte(&pbc, 0);
> + bytestream2_put_byte(&pbc, 0);
>
> - put_bits(&pbc, 16, 2 + 17 + mt); // Lh
> - put_bits(&pbc, 4, t & 1); // Tc
> - put_bits(&pbc, 4, t / 2); // Th
> + av_assert0(bytestream2_get_bytes_left_p(&pbc) == 0);
>
> - for (i = 0; i < 16; i++)
> - put_bits(&pbc, 8, lengths[i]);
> - for (i = 0; i < mt; i++)
> - put_bits(&pbc, 8, values[i]);
> + app->Lp = 2 + sizeof(priv->jfif_data);
> + app->Ap = priv->jfif_data;
> + app->Ap_ref = NULL;
> }
>
> - vaapi_encode_mjpeg_write_marker(&pbc, SOS);
> + // Quantisation tables.
>
> - av_assert0(vpic->num_components == vslice->num_components);
> + if (priv->quality < 50)
> + quant_scale = 5000 / priv->quality;
> + else
> + quant_scale = 200 - 2 * priv->quality;
>
> - put_bits(&pbc, 16, 6 + 2 * vslice->num_components); // Ls
> - put_bits(&pbc, 8, vslice->num_components); // Ns
> + len = 2;
>
> - for (i = 0; i < vslice->num_components; i++) {
> - put_bits(&pbc, 8, vslice->components[i].component_selector); // Csj
> - put_bits(&pbc, 4, vslice->components[i].dc_table_selector); // Tdj
> - put_bits(&pbc, 4, vslice->components[i].ac_table_selector); // Taj
> - }
> + for (t = 0; t < 1 + (fh->Nf > 1); t++) {
> + JPEGRawQuantisationTable *quant = &priv->quant_tables.table[t];
> + const uint8_t *data = t == 0 ?
> + vaapi_encode_mjpeg_quant_luminance :
> + vaapi_encode_mjpeg_quant_chrominance;
>
> - put_bits(&pbc, 8, 0); // Ss
> - put_bits(&pbc, 8, 63); // Se
> - put_bits(&pbc, 4, 0); // Ah
> - put_bits(&pbc, 4, 0); // Al
> + quant->Pq = 0;
> + quant->Tq = t;
> + for (i = 0; i < 64; i++)
> + quant->Q[i] = av_clip(data[i] * quant_scale / 100, 1, 255);
>
> - *data_len = put_bits_count(&pbc);
> - flush_put_bits(&pbc);
> + len += 65;
> + }
>
> - return 0;
> -}
> + priv->quant_tables.Lq = len;
> +
> + // Huffman tables.
> +
> + len = 2;
> +
> + for (t = 0; t < 2 + 2 * (fh->Nf > 1); t++) {
> + JPEGRawHuffmanTable *huff = &priv->huffman_tables.table[t];
> + const uint8_t *lengths, *values;
> + int k;
> +
> + switch (t) {
> + case 0:
> + lengths = avpriv_mjpeg_bits_dc_luminance + 1;
> + values = avpriv_mjpeg_val_dc;
> + break;
> + case 1:
> + lengths = avpriv_mjpeg_bits_ac_luminance + 1;
> + values = avpriv_mjpeg_val_ac_luminance;
> + break;
> + case 2:
> + lengths = avpriv_mjpeg_bits_dc_chrominance + 1;
> + values = avpriv_mjpeg_val_dc;
> + break;
> + case 3:
> + lengths = avpriv_mjpeg_bits_ac_chrominance + 1;
> + values = avpriv_mjpeg_val_ac_chrominance;
> + break;
> + }
>
> -static int vaapi_encode_mjpeg_write_extra_buffer(AVCodecContext *avctx,
> - VAAPIEncodePicture *pic,
> - int index, int *type,
> - char *data, size_t
> *data_len)
> -{
> - VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + huff->Tc = t % 2;
> + huff->Th = t / 2;
>
> - if (index == 0) {
> - // Write quantisation tables.
> - if (*data_len < sizeof(priv->quant_tables))
> - return AVERROR(EINVAL);
> - *type = VAQMatrixBufferType;
> - memcpy(data, &priv->quant_tables,
> - *data_len = sizeof(priv->quant_tables));
> + for (i = k = 0; i < 16; i++)
> + k += (huff->L[i] = lengths[i]);
>
> - } else if (index == 1) {
> - // Write huffman tables.
> - if (*data_len < sizeof(priv->huffman_tables))
> - return AVERROR(EINVAL);
> - *type = VAHuffmanTableBufferType;
> - memcpy(data, &priv->huffman_tables,
> - *data_len = sizeof(priv->huffman_tables));
> + for (i = 0; i < k; i++)
> + huff->V[i] = values[i];
>
> - } else {
> - return AVERROR_EOF;
> + len += 17 + k;
> }
> - return 0;
> -}
>
> -static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
> - VAAPIEncodePicture *pic)
> -{
> - VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> - VAEncPictureParameterBufferJPEG *vpic = pic->codec_picture_params;
> + priv->huffman_tables.Lh = len;
> +
> + // Scan header.
> +
> + sh->Ns = fh->Nf;
> +
> + for (i = 0; i < fh->Nf; i++) {
> + sh->Cs[i] = fh->C[i];
> + sh->Td[i] = i > 0;
> + sh->Ta[i] = i > 0;
> + }
>
> - vpic->reconstructed_picture = pic->recon_surface;
> - vpic->coded_buf = pic->output_buffer;
> + sh->Ss = 0;
> + sh->Se = 63;
> + sh->Ah = 0;
> + sh->Al = 0;
>
> - vpic->picture_width = avctx->width;
> - vpic->picture_height = avctx->height;
> + sh->Ls = 6 + 2 * sh->Ns;
>
> - vpic->pic_flags.bits.profile = 0;
> - vpic->pic_flags.bits.progressive = 0;
> - vpic->pic_flags.bits.huffman = 1;
> - vpic->pic_flags.bits.interleaved = 0;
> - vpic->pic_flags.bits.differential = 0;
>
> - vpic->sample_bit_depth = 8;
> - vpic->num_scan = 1;
> + *vpic = (VAEncPictureParameterBufferJPEG) {
> + .reconstructed_picture = pic->recon_surface,
> + .coded_buf = pic->output_buffer,
>
> - vpic->num_components = 3;
> + .picture_width = fh->X,
> + .picture_height = fh->Y,
>
> - vpic->component_id[0] = 1;
> - vpic->component_id[1] = 2;
> - vpic->component_id[2] = 3;
> + .pic_flags.bits = {
> + .profile = 0,
> + .progressive = 0,
> + .huffman = 1,
> + .interleaved = 0,
> + .differential = 0,
> + },
>
> - priv->component_subsample_h[0] = 2;
> - priv->component_subsample_v[0] = 2;
> - priv->component_subsample_h[1] = 1;
> - priv->component_subsample_v[1] = 1;
> - priv->component_subsample_h[2] = 1;
> - priv->component_subsample_v[2] = 1;
> + .sample_bit_depth = fh->P,
> + .num_scan = 1,
> + .num_components = fh->Nf,
>
> - vpic->quantiser_table_selector[0] = 0;
> - vpic->quantiser_table_selector[1] = 1;
> - vpic->quantiser_table_selector[2] = 1;
> + // The driver modifies the provided quantisation tables according
> + // to this quality value; the middle value of 50 makes that the
> + // identity so that they are used unchanged.
> + .quality = 50,
> + };
>
> - vpic->quality = priv->quality;
> + for (i = 0; i < fh->Nf; i++) {
> + vpic->component_id[i] = fh->C[i];
> + vpic->quantiser_table_selector[i] = fh->Tq[i];
> + }
>
> pic->nb_slices = 1;
>
> @@ -315,17 +411,20 @@ static int
> vaapi_encode_mjpeg_init_slice_params(AVCodecContext *avctx,
> VAAPIEncodePicture *pic,
> VAAPIEncodeSlice *slice)
> {
> - VAEncPictureParameterBufferJPEG *vpic = pic->codec_picture_params;
> + VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + JPEGRawScanHeader *sh = &priv->scan.header;
> VAEncSliceParameterBufferJPEG *vslice = slice->codec_slice_params;
> int i;
>
> - vslice->restart_interval = 0;
> + *vslice = (VAEncSliceParameterBufferJPEG) {
> + .restart_interval = 0,
> + .num_components = sh->Ns,
> + };
>
> - vslice->num_components = vpic->num_components;
> - for (i = 0; i < vslice->num_components; i++) {
> - vslice->components[i].component_selector = i + 1;
> - vslice->components[i].dc_table_selector = (i > 0);
> - vslice->components[i].ac_table_selector = (i > 0);
> + for (i = 0; i < sh->Ns; i++) {
> + vslice->components[i].component_selector = sh->Cs[i];
> + vslice->components[i].dc_table_selector = sh->Td[i];
> + vslice->components[i].ac_table_selector = sh->Ta[i];
> }
>
> return 0;
> @@ -335,6 +434,7 @@ static av_cold int
> vaapi_encode_mjpeg_configure(AVCodecContext *avctx)
> {
> VAAPIEncodeContext *ctx = avctx->priv_data;
> VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> + int err;
>
> priv->quality = avctx->global_quality;
> if (priv->quality < 1 || priv->quality > 100) {
> @@ -354,14 +454,22 @@ static av_cold int
> vaapi_encode_mjpeg_configure(AVCodecContext *avctx)
> ctx->va_packed_headers |= VA_ENC_PACKED_HEADER_SLICE;
> }
>
> - vaapi_encode_mjpeg_init_tables(avctx);
> + err = ff_cbs_init(&priv->cbc, AV_CODEC_ID_MJPEG, avctx);
> + if (err < 0)
> + return err;
>
> return 0;
> }
>
> static const VAAPIEncodeProfile vaapi_encode_mjpeg_profiles[] = {
> + { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT,
> + 8, 1, 0, 0, VAProfileJPEGBaseline },
> { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT,
> 8, 3, 1, 1, VAProfileJPEGBaseline },
> + { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT,
> + 8, 3, 1, 0, VAProfileJPEGBaseline },
> + { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT,
> + 8, 3, 0, 0, VAProfileJPEGBaseline },
> { FF_PROFILE_UNKNOWN }
> };
>
> @@ -398,6 +506,30 @@ static av_cold int vaapi_encode_mjpeg_init(AVCodecContext
> *avctx)
> return ff_vaapi_encode_init(avctx);
> }
>
> +static av_cold int vaapi_encode_mjpeg_close(AVCodecContext *avctx)
> +{
> + VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
> +
> + ff_cbs_close(&priv->cbc);
> +
> + return ff_vaapi_encode_close(avctx);
> +}
> +
> +#define OFFSET(x) offsetof(VAAPIEncodeMJPEGContext, x)
> +#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
> +static const AVOption vaapi_encode_mjpeg_options[] = {
> + VAAPI_ENCODE_COMMON_OPTIONS,
> +
> + { "jfif", "Include JFIF header",
> + OFFSET(jfif), AV_OPT_TYPE_BOOL,
> + { .i64 = 0 }, 0, 1, FLAGS },
> + { "huffman", "Include huffman tables",
> + OFFSET(huffman), AV_OPT_TYPE_BOOL,
> + { .i64 = 1 }, 0, 1, FLAGS },
> +
> + { NULL },
> +};
> +
> static const AVCodecDefault vaapi_encode_mjpeg_defaults[] = {
> { "global_quality", "80" },
> { "b", "0" },
> @@ -408,6 +540,7 @@ static const AVCodecDefault vaapi_encode_mjpeg_defaults[]
> = {
> static const AVClass vaapi_encode_mjpeg_class = {
> .class_name = "mjpeg_vaapi",
> .item_name = av_default_item_name,
> + .option = vaapi_encode_mjpeg_options,
> .version = LIBAVUTIL_VERSION_INT,
> };
>
> @@ -419,7 +552,7 @@ AVCodec ff_mjpeg_vaapi_encoder = {
> .priv_data_size = sizeof(VAAPIEncodeMJPEGContext),
> .init = &vaapi_encode_mjpeg_init,
> .encode2 = &ff_vaapi_encode2,
> - .close = &ff_vaapi_encode_close,
> + .close = &vaapi_encode_mjpeg_close,
> .priv_class = &vaapi_encode_mjpeg_class,
> .capabilities = AV_CODEC_CAP_HARDWARE,
> .defaults = vaapi_encode_mjpeg_defaults,
More information about the ffmpeg-devel
mailing list