This example shows how to do VAAPI-accelerated transcoding.Usage: vaapi_transcode input_stream codec output_stream e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
- vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
#include <stdio.h>
#include <errno.h>
{
return *p;
}
fprintf(stderr, "Unable to decode this file using VA-API.\n");
}
{
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
}
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
}
fprintf(stderr, "Cannot find a video stream in the input file. "
}
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
}
fprintf(stderr, "A hardware device reference create failed.\n");
}
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
}
{
fprintf(stderr,
"Error during encoding. Error code: %s\n",
av_err2str(
ret));
goto end;
}
while (1) {
break;
fprintf(stderr, "Error during writing data to output file. "
return -1;
}
}
end:
return 0;
}
{
fprintf(stderr,
"Error during decoding. Error code: %s\n",
av_err2str(
ret));
}
return 0;
fprintf(stderr,
"Error while decoding. Error code: %s\n",
av_err2str(
ret));
}
}
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
}
fprintf(stderr, "Failed to allocate stream for output format.\n");
}
fprintf(stderr, "Failed to copy the stream parameters. "
}
fprintf(stderr, "Error while writing stream header. "
}
}
fprintf(stderr, "Error during encoding and writing.\n");
}
return 0;
}
int main(
int argc,
char **argv)
{
if (argc != 4) {
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return -1;
}
fprintf(stderr,
"Failed to create a VAAPI device. Error code: %s\n",
av_err2str(
ret));
return -1;
}
if (!dec_pkt) {
fprintf(stderr, "Failed to allocate decode packet\n");
goto end;
}
goto end;
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
goto end;
}
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
goto end;
}
goto end;
}
fprintf(stderr, "Cannot open output file. "
goto end;
}
break;
}
end:
}
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
AVPixelFormat
Pixel format.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
static AVFormatContext * ofmt_ctx
#define AVERROR_EOF
End of file.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
static AVFormatContext * ifmt_ctx
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static const chunk_decoder decoder[8]
static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
static enum AVPixelFormat pix_fmts[]
#define AVIO_FLAG_WRITE
write-only
static AVCodecContext * decoder_ctx
static int open_input_file(const char *filename)
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
AVIOContext * pb
I/O context.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int main(int argc, char **argv)
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static AVCodecContext * encoder_ctx
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
static AVBufferRef * hw_device_ctx
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
main external API structure.
A Quick Description Of Rate Distortion Theory We want to encode a video
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
A reference to a data buffer.
This structure stores compressed data.
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int width
picture width / height.
const AVCodec * avcodec_find_encoder_by_name(const char *name)
Find a registered encoder with the specified name.