36 #define MAKE_WRITABLE(table) \ 39 (ret = av_buffer_make_writable(&pic->table)) < 0)\ 50 for (i = 0; i < 2; i++) {
61 # define EMU_EDGE_HEIGHT (4 * 70) 97 int chroma_x_shift,
int chroma_y_shift,
98 int linesize,
int uvlinesize)
121 if (r < 0 || !pic->
f->buf[0]) {
129 for (i = 0; pic->
f->
data[
i]; i++) {
144 av_log(avctx,
AV_LOG_ERROR,
"alloc_frame_buffer() failed (hwaccel private data allocation)\n");
151 if ((linesize && linesize != pic->
f->
linesize[0]) ||
152 (uvlinesize && uvlinesize != pic->
f->
linesize[1])) {
154 "get_buffer() failed (stride changed: linesize=%d/%d uvlinesize=%d/%d)\n",
164 "get_buffer() failed (uv stride mismatch)\n");
173 "get_buffer() failed to allocate context scratch buffers.\n");
182 int mb_stride,
int mb_width,
int mb_height,
int b8_stride)
184 const int big_mb_num = mb_stride * (mb_height + 1) + 1;
185 const int mb_array_size = mb_stride * mb_height;
186 const int b8_array_size = b8_stride * mb_height * 2;
205 if (out_format ==
FMT_H263 || encoding ||
210 int mv_size = 2 * (b8_array_size + 4) *
sizeof(int16_t);
211 int ref_index_size = 4 * mb_array_size;
213 for (i = 0; mv_size && i < 2; i++) {
234 int chroma_x_shift,
int chroma_y_shift,
int out_format,
235 int mb_stride,
int mb_width,
int mb_height,
int b8_stride,
236 ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
251 chroma_x_shift, chroma_y_shift,
252 *linesize, *uvlinesize) < 0)
261 mb_stride, mb_width, mb_height, b8_stride);
278 for (i = 0; i < 2; i++) {
314 memset((
uint8_t*)pic + off, 0,
sizeof(*pic) - off);
327 for (i = 0; i < 2; i++) {
343 for (i = 0; i < 2; i++) {
413 if (!picture[i].
f->buf[0])
424 "Internal error, picture buffer overflow\n");
445 if (picture[ret].needs_realloc) {
468 for (i = 0; i < 2; i++) {
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
const struct AVCodec * codec
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
int av_codec_is_encoder(const AVCodec *codec)
uint8_t * mb_mean
Table for MB luminance.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
uint16_t * mb_var
Table for MB variances.
static int pic_is_unused(Picture *pic)
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
int field_picture
whether or not the picture was encoded in separate fields
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static int alloc_frame_buffer(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int chroma_x_shift, int chroma_y_shift, int linesize, int uvlinesize)
Allocate a frame buffer.
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int alloc_mb_width
mb_width used to allocate tables
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame...
#define AV_CODEC_EXPORT_DATA_MVS
Export motion vectors through frame side data.
static int make_tables_writable(Picture *pic)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Motion estimation context.
void ff_free_picture_tables(Picture *pic)
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
AVBufferRef * mb_type_buf
AVBufferRef * mb_mean_buf
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int alloc_picture_tables(AVCodecContext *avctx, Picture *pic, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
#define MAX_PICTURE_COUNT
simple assert() macros that are a bit more flexible than ISO C assert().
int64_t max_pixels
The number of pixels per image to maximally accept.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
AVBufferRef * hwaccel_priv_buf
AVBufferRef * motion_val_buf[2]
int width
picture width / height.
int16_t(*[2] motion_val)[2]
void * hwaccel_picture_private
Hardware accelerator private data.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
uint16_t * mc_mb_var
Table for motion compensated MB variances.
AVBufferRef * qscale_table_buf
int alloc_mb_height
mb_height used to allocate tables
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
uint8_t * data
The data buffer.
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
#define MAKE_WRITABLE(table)
common internal and external API header
AVBufferRef * mbskip_table_buf
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
int ff_update_picture_tables(Picture *dst, Picture *src)
uint8_t * obmc_scratchpad
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
AVBufferRef * mc_mb_var_buf
uint32_t * mb_type
types and macros are defined in mpegutils.h
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
int debug_mv
debug motion vectors
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int alloc_mb_stride
mb_stride used to allocate tables
int64_t mb_var_sum
sum of MB variance for current frame
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
AVBufferRef * ref_index_buf[2]