This example shows how to do VAAPI-accelerated transcoding.Usage: vaapi_transcode input_stream codec output_stream e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
- vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
#include <stdio.h>
#include <errno.h>
{
return *p;
}
fprintf(stderr, "Unable to decode this file using VA-API.\n");
}
{
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
}
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
}
fprintf(stderr, "Cannot find a video stream in the input file. "
}
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
}
fprintf(stderr, "A hardware device reference create failed.\n");
}
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
}
{
fprintf(stderr,
"Error during encoding. Error code: %s\n",
av_err2str(
ret));
}
while (1) {
break;
fprintf(stderr, "Error during writing data to output file. "
return -1;
}
}
return 0;
}
{
fprintf(stderr,
"Error during decoding. Error code: %s\n",
av_err2str(
ret));
}
return 0;
fprintf(stderr,
"Error while decoding. Error code: %s\n",
av_err2str(
ret));
}
}
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
}
fprintf(stderr, "Failed to allocate stream for output format.\n");
}
fprintf(stderr, "Failed to copy the stream parameters. "
}
fprintf(stderr, "Error while writing stream header. "
}
}
fprintf(stderr, "Error during encoding and writing.\n");
}
return 0;
}
int main(
int argc,
char **argv)
{
if (argc != 4) {
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return -1;
}
fprintf(stderr,
"Failed to create a VAAPI device. Error code: %s\n",
av_err2str(
ret));
return -1;
}
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
}
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
}
}
fprintf(stderr, "Cannot open output file. "
}
break;
}
}
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
AVPixelFormat
Pixel format.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
static AVFormatContext * ofmt_ctx
#define AVERROR_EOF
End of file.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
static AVFormatContext * ifmt_ctx
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
static const chunk_decoder decoder[8]
static int encode_write(AVFrame *frame)
static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts)
AVCodec * avcodec_find_encoder_by_name(const char *name)
Find a registered encoder with the specified name.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
static enum AVPixelFormat pix_fmts[]
#define AVIO_FLAG_WRITE
write-only
static AVCodecContext * decoder_ctx
static int open_input_file(const char *filename)
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
AVIOContext * pb
I/O context.
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int main(int argc, char **argv)
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static AVCodecContext * encoder_ctx
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
static AVBufferRef * hw_device_ctx
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
main external API structure.
A Quick Description Of Rate Distortion Theory We want to encode a video
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
A reference to a data buffer.
This structure stores compressed data.
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int width
picture width / height.
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.