51 fprintf(stderr,
"Failed to create VAAPI frame context.\n");
61 fprintf(stderr,
"Failed to initialize VAAPI frame context." 84 fprintf(stderr,
"Error code: %s\n",
av_err2str(ret));
93 ret = fwrite(enc_pkt.
data, enc_pkt.
size, 1, fout);
98 ret = ((ret ==
AVERROR(EAGAIN)) ? 0 : -1);
102 int main(
int argc,
char *argv[])
109 const char *
enc_name =
"h264_vaapi";
112 fprintf(stderr,
"Usage: %s <width> <height> <input file> <output file>\n", argv[0]);
116 width = atoi(argv[1]);
120 if (!(fin = fopen(argv[3],
"r"))) {
121 fprintf(stderr,
"Fail to open input file : %s\n", strerror(errno));
124 if (!(fout = fopen(argv[4],
"w+b"))) {
125 fprintf(stderr,
"Fail to open output file : %s\n", strerror(errno));
133 fprintf(stderr,
"Failed to create a VAAPI device. Error code: %s\n",
av_err2str(err));
138 fprintf(stderr,
"Could not find encoder.\n");
157 fprintf(stderr,
"Failed to set hwframe context.\n");
162 fprintf(stderr,
"Cannot open video encoder codec. Error code: %s\n",
av_err2str(err));
177 if ((err = fread((
uint8_t*)(sw_frame->
data[0]), size, 1, fin)) <= 0)
179 if ((err = fread((
uint8_t*)(sw_frame->
data[1]), size/2, 1, fin)) <= 0)
187 fprintf(stderr,
"Error code: %s.\n",
av_err2str(err));
190 if (!hw_frame->hw_frames_ctx) {
195 fprintf(stderr,
"Error while transferring frame data to surface." 200 if ((err = (
encode_write(avctx, hw_frame, fout))) < 0) {
201 fprintf(stderr,
"Failed to encode.\n");
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int width
The allocated dimensions of the frames in this pool.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int end(AVCodecContext *avctx)
int main(int argc, char *argv[])
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
#define AVERROR_EOF
End of file.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
static AVBufferRef * hw_device_ctx
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
int initial_pool_size
Initial size of the frame pool.
AVCodec * avcodec_find_encoder_by_name(const char *name)
Find a registered encoder with the specified name.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int width
picture width / height.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Libavcodec external API header.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Rational number (pair of numerator and denominator).
This struct describes a set or pool of "hardware" frames (i.e.
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
A reference to a data buffer.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
This structure stores compressed data.