FFmpeg
hw_decode.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Jun Zhao
3  * Copyright (c) 2017 Kaixuan Liu
4  *
5  * HW Acceleration API (video decoding) decode sample
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 /**
27  * @file HW-accelerated decoding API usage.example
28  * @example hw_decode.c
29  *
30  * Perform HW-accelerated decoding with output frames from HW video
31  * surfaces.
32  */
33 
34 #include <stdio.h>
35 
36 #include <libavcodec/avcodec.h>
37 #include <libavformat/avformat.h>
38 #include <libavutil/pixdesc.h>
39 #include <libavutil/hwcontext.h>
40 #include <libavutil/opt.h>
41 #include <libavutil/avassert.h>
42 #include <libavutil/imgutils.h>
43 
46 static FILE *output_file = NULL;
47 
49 {
50  int err = 0;
51 
53  NULL, NULL, 0)) < 0) {
54  fprintf(stderr, "Failed to create specified HW device.\n");
55  return err;
56  }
57  ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
58 
59  return err;
60 }
61 
63  const enum AVPixelFormat *pix_fmts)
64 {
65  const enum AVPixelFormat *p;
66 
67  for (p = pix_fmts; *p != -1; p++) {
68  if (*p == hw_pix_fmt)
69  return *p;
70  }
71 
72  fprintf(stderr, "Failed to get HW surface format.\n");
73  return AV_PIX_FMT_NONE;
74 }
75 
76 static int decode_write(AVCodecContext *avctx, AVPacket *packet)
77 {
78  AVFrame *frame = NULL, *sw_frame = NULL;
79  AVFrame *tmp_frame = NULL;
80  uint8_t *buffer = NULL;
81  int size;
82  int ret = 0;
83 
84  ret = avcodec_send_packet(avctx, packet);
85  if (ret < 0) {
86  fprintf(stderr, "Error during decoding\n");
87  return ret;
88  }
89 
90  while (1) {
91  if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
92  fprintf(stderr, "Can not alloc frame\n");
93  ret = AVERROR(ENOMEM);
94  goto fail;
95  }
96 
98  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
100  av_frame_free(&sw_frame);
101  return 0;
102  } else if (ret < 0) {
103  fprintf(stderr, "Error while decoding\n");
104  goto fail;
105  }
106 
107  if (frame->format == hw_pix_fmt) {
108  /* retrieve data from GPU to CPU */
109  if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
110  fprintf(stderr, "Error transferring the data to system memory\n");
111  goto fail;
112  }
113  tmp_frame = sw_frame;
114  } else
115  tmp_frame = frame;
116 
117  size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width,
118  tmp_frame->height, 1);
119  buffer = av_malloc(size);
120  if (!buffer) {
121  fprintf(stderr, "Can not alloc buffer\n");
122  ret = AVERROR(ENOMEM);
123  goto fail;
124  }
126  (const uint8_t * const *)tmp_frame->data,
127  (const int *)tmp_frame->linesize, tmp_frame->format,
128  tmp_frame->width, tmp_frame->height, 1);
129  if (ret < 0) {
130  fprintf(stderr, "Can not copy image to buffer\n");
131  goto fail;
132  }
133 
134  if ((ret = fwrite(buffer, 1, size, output_file)) < 0) {
135  fprintf(stderr, "Failed to dump raw data.\n");
136  goto fail;
137  }
138 
139  fail:
141  av_frame_free(&sw_frame);
142  av_freep(&buffer);
143  if (ret < 0)
144  return ret;
145  }
146 }
147 
148 int main(int argc, char *argv[])
149 {
150  AVFormatContext *input_ctx = NULL;
151  int video_stream, ret;
152  AVStream *video = NULL;
154  const AVCodec *decoder = NULL;
155  AVPacket *packet = NULL;
156  enum AVHWDeviceType type;
157  int i;
158 
159  if (argc < 4) {
160  fprintf(stderr, "Usage: %s <device type> <input file> <output file>\n", argv[0]);
161  return -1;
162  }
163 
165  if (type == AV_HWDEVICE_TYPE_NONE) {
166  fprintf(stderr, "Device type %s is not supported.\n", argv[1]);
167  fprintf(stderr, "Available device types:");
169  fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
170  fprintf(stderr, "\n");
171  return -1;
172  }
173 
174  packet = av_packet_alloc();
175  if (!packet) {
176  fprintf(stderr, "Failed to allocate AVPacket\n");
177  return -1;
178  }
179 
180  /* open the input file */
181  if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
182  fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
183  return -1;
184  }
185 
186  if (avformat_find_stream_info(input_ctx, NULL) < 0) {
187  fprintf(stderr, "Cannot find input stream information.\n");
188  return -1;
189  }
190 
191  /* find the video stream information */
192  ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
193  if (ret < 0) {
194  fprintf(stderr, "Cannot find a video stream in the input file\n");
195  return -1;
196  }
197  video_stream = ret;
198 
199  for (i = 0;; i++) {
201  if (!config) {
202  fprintf(stderr, "Decoder %s does not support device type %s.\n",
204  return -1;
205  }
207  config->device_type == type) {
208  hw_pix_fmt = config->pix_fmt;
209  break;
210  }
211  }
212 
214  return AVERROR(ENOMEM);
215 
216  video = input_ctx->streams[video_stream];
217  if (avcodec_parameters_to_context(decoder_ctx, video->codecpar) < 0)
218  return -1;
219 
221 
222  if (hw_decoder_init(decoder_ctx, type) < 0)
223  return -1;
224 
225  if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
226  fprintf(stderr, "Failed to open codec for stream #%u\n", video_stream);
227  return -1;
228  }
229 
230  /* open the file to dump raw data */
231  output_file = fopen(argv[3], "w+b");
232 
233  /* actual decoding and dump the raw data */
234  while (ret >= 0) {
235  if ((ret = av_read_frame(input_ctx, packet)) < 0)
236  break;
237 
238  if (video_stream == packet->stream_index)
239  ret = decode_write(decoder_ctx, packet);
240 
241  av_packet_unref(packet);
242  }
243 
244  /* flush the decoder */
246 
247  if (output_file)
248  fclose(output_file);
249  av_packet_free(&packet);
251  avformat_close_input(&input_ctx);
253 
254  return 0;
255 }
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:184
hw_pix_fmt
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:45
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
Definition: avcodec.h:689
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1172
AVFrame::width
int width
Definition: frame.h:402
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:83
av_hwdevice_iterate_types
enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev)
Iterate over supported device types.
Definition: hwcontext.c:102
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1439
tf_sess_config.config
config
Definition: tf_sess_config.py:33
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: avformat.c:414
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:369
decoder
static const chunk_decoder decoder[8]
Definition: dfa.c:331
get_hw_format
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts)
Definition: hw_decode.c:62
fail
#define fail()
Definition: checkasm.h:134
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
avassert.h
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:221
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:709
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:93
AVFormatContext
Format I/O context.
Definition: avformat.h:1104
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2425
size
int size
Definition: twinvq_data.h:10344
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:598
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
hw_device_ctx
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:44
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:182
main
int main(int argc, char *argv[])
Definition: hw_decode.c:148
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:838
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:615
avformat.h
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:448
output_file
static FILE * output_file
Definition: hw_decode.c:46
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVFrame::height
int height
Definition: frame.h:402
hw_decoder_init
static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
Definition: hw_decode.c:48
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
video
A Quick Description Of Rate Distortion Theory We want to encode a video
Definition: rate_distortion.txt:3
AVPacket::stream_index
int stream_index
Definition: packet.h:376
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:884
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
imgutils.h
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:306
decoder_ctx
static AVCodecContext * decoder_ctx
Definition: qsv_transcode.c:47
AVCodecHWConfig
Definition: codec.h:338
decode_write
static int decode_write(AVCodecContext *avctx, AVPacket *packet)
Definition: hw_decode.c:76