Go to the documentation of this file.
44 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
46 typedef struct SpriteData {
60 int effect_type, effect_flag;
61 int effect_pcount1, effect_pcount2;
62 int effect_params1[15], effect_params2[10];
70 static void vc1_sprite_parse_transform(
GetBitContext* gb,
int c[7])
77 c[2] = get_fp_val(gb);
81 c[0] =
c[4] = get_fp_val(gb);
82 c[2] = get_fp_val(gb);
85 c[0] = get_fp_val(gb);
86 c[2] = get_fp_val(gb);
87 c[4] = get_fp_val(gb);
90 c[0] = get_fp_val(gb);
91 c[1] = get_fp_val(gb);
92 c[2] = get_fp_val(gb);
93 c[3] = get_fp_val(gb);
94 c[4] = get_fp_val(gb);
97 c[5] = get_fp_val(gb);
99 c[6] = get_fp_val(gb);
109 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
110 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
111 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
114 for (
i = 0;
i < 7;
i++)
116 sd->coefs[sprite][
i] / (1<<16),
117 (
abs(sd->coefs[sprite][
i]) & 0xFFFF) * 1000 / (1 << 16));
123 switch (sd->effect_pcount1 =
get_bits(gb, 4)) {
125 vc1_sprite_parse_transform(gb, sd->effect_params1);
128 vc1_sprite_parse_transform(gb, sd->effect_params1);
129 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
132 for (
i = 0;
i < sd->effect_pcount1;
i++)
133 sd->effect_params1[
i] = get_fp_val(gb);
135 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
138 for (
i = 0;
i < sd->effect_pcount1;
i++)
140 sd->effect_params1[
i] / (1 << 16),
141 (
abs(sd->effect_params1[
i]) & 0xFFFF) * 1000 / (1 << 16));
145 sd->effect_pcount2 =
get_bits(gb, 16);
146 if (sd->effect_pcount2 > 10) {
149 }
else if (sd->effect_pcount2) {
152 while (++i < sd->effect_pcount2) {
153 sd->effect_params2[
i] = get_fp_val(gb);
155 sd->effect_params2[
i] / (1 << 16),
156 (
abs(sd->effect_params2[
i]) & 0xFFFF) * 1000 / (1 << 16));
175 static void vc1_draw_sprites(
VC1Context *v, SpriteData* sd)
177 int i, plane, row, sprite;
178 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
180 int xoff[2], xadv[2], yoff[2], yadv[2],
alpha;
186 xadv[
i] = sd->coefs[
i][0];
193 alpha = av_clip_uint16(sd->coefs[1][6]);
195 for (plane = 0; plane < (CONFIG_GRAY &&
s->avctx->flags &
AV_CODEC_FLAG_GRAY ? 1 : 3); plane++) {
202 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
203 uint8_t *iplane =
s->current_picture.f->data[plane];
204 int iline =
s->current_picture.f->linesize[plane];
205 int ycoord = yoff[sprite] + yadv[sprite] * row;
206 int yline = ycoord >> 16;
208 ysub[sprite] = ycoord & 0xFFFF;
210 iplane =
s->last_picture.f->data[plane];
211 iline =
s->last_picture.f->linesize[plane];
214 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
215 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
217 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
219 if (sr_cache[sprite][0] != yline) {
220 if (sr_cache[sprite][1] == yline) {
222 FFSWAP(
int, sr_cache[sprite][0], sr_cache[sprite][1]);
225 sr_cache[sprite][0] = yline;
228 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
230 iplane + next_line, xoff[sprite],
231 xadv[sprite],
width);
232 sr_cache[sprite][1] = yline + 1;
234 src_h[sprite][0] = v->
sr_rows[sprite][0];
235 src_h[sprite][1] = v->
sr_rows[sprite][1];
243 memcpy(dst, src_h[0][0],
width);
246 if (ysub[0] && ysub[1]) {
248 src_h[1][0], src_h[1][1], ysub[1],
alpha,
width);
249 }
else if (ysub[0]) {
252 }
else if (ysub[1]) {
279 memset(&sd, 0,
sizeof(sd));
281 ret = vc1_parse_sprites(v, gb, &sd);
285 if (!
s->current_picture.f || !
s->current_picture.f->data[0]) {
290 if (v->
two_sprites && (!
s->last_picture_ptr || !
s->last_picture.f->data[0])) {
299 vc1_draw_sprites(v, &sd);
316 for (plane = 0; plane < (CONFIG_GRAY &&
s->avctx->flags &
AV_CODEC_FLAG_GRAY ? 1 : 3); plane++)
318 memset(
f->data[plane] +
i *
f->linesize[plane],
319 plane ? 128 : 0,
f->linesize[plane]);
328 int mb_height =
FFALIGN(
s->mb_height, 2);
373 v->
mv_f_base =
av_mallocz(2 * (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2));
377 v->
mv_f[1] = v->
mv_f[0] + (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2);
382 v->
mv_f_next[1] = v->
mv_f_next[0] + (
s->b8_stride * (mb_height * 2 + 1) +
s->mb_stride * (mb_height + 1) * 2);
385 for (
i = 0;
i < 4;
i++)
391 s->block,
s->block_last_index,
392 s->mb_width,
s->mb_height);
406 for (
i = 0;
i < 64;
i++) {
407 #define transpose(x) (((x) >> 3) | (((x) & 7) << 3))
462 }
else if (count < 0) {
471 int seq_initialized = 0, ep_initialized = 0;
484 for (; next <
end; start = next) {
486 size = next - start - 4;
509 if (!seq_initialized || !ep_initialized) {
606 for (
i = 0;
i < 4;
i++)
638 int buf_size = avpkt->
size, n_slices = 0,
i,
ret;
643 const uint8_t *buf_start = buf, *buf_start_second_field =
NULL;
644 int mb_height, n_slices1=-1;
661 if (
s->low_delay == 0 &&
s->next_picture_ptr) {
664 s->next_picture_ptr =
NULL;
684 for (start = buf,
end = buf + buf_size; next <
end; start = next) {
686 size = next - start - 4;
687 if (
size <= 0)
continue;
697 buf_start_second_field = start;
705 if (!slices[n_slices].buf) {
710 slices[n_slices].buf);
713 slices[n_slices].mby_start = avctx->
coded_height + 31 >> 5;
714 slices[n_slices].rawbuf = start;
715 slices[n_slices].raw_size =
size + 4;
716 n_slices1 = n_slices - 1;
734 if (!slices[n_slices].buf) {
739 slices[n_slices].buf);
742 slices[n_slices].mby_start =
get_bits(&slices[n_slices].gb, 9);
743 slices[n_slices].rawbuf = start;
744 slices[n_slices].raw_size =
size + 4;
750 }
else if (v->
interlace && ((buf[0] & 0xC0) == 0xC0)) {
761 buf_start_second_field = divider;
769 if (!slices[n_slices].buf) {
773 buf_size3 =
vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
776 slices[n_slices].mby_start =
s->mb_height + 1 >> 1;
777 slices[n_slices].rawbuf = divider;
778 slices[n_slices].raw_size = buf + buf_size - divider;
779 n_slices1 = n_slices - 1;
808 if (
s->context_initialized &&
814 if (!
s->context_initialized) {
870 s->current_picture.f->pict_type =
s->pict_type;
884 if (
s->next_p_frame_damaged) {
888 s->next_p_frame_damaged = 0;
900 s->current_picture_ptr->f->repeat_pict = 0;
905 s->current_picture_ptr->f->repeat_pict = 1;
908 s->current_picture_ptr->f->repeat_pict = v->
rptfrm * 2;
911 s->me.qpel_put =
s->qdsp.put_qpel_pixels_tab;
912 s->me.qpel_avg =
s->qdsp.avg_qpel_pixels_tab;
916 if (v->
field_mode && buf_start_second_field) {
922 if (n_slices1 == -1) {
930 for (
i = 0 ;
i < n_slices1 + 1;
i++) {
931 s->gb = slices[
i].gb;
932 s->mb_y = slices[
i].mby_start;
954 s->gb = slices[n_slices1 + 1].gb;
955 s->mb_y = slices[n_slices1 + 1].mby_start;
966 if ((
ret = avctx->
hwaccel->
start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
969 if (n_slices - n_slices1 == 2) {
971 if ((
ret = avctx->
hwaccel->
decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
974 if ((
ret = avctx->
hwaccel->
decode_slice(avctx, buf_start_second_field, slices[n_slices1 + 2].rawbuf - buf_start_second_field)) < 0)
977 for (
i = n_slices1 + 2;
i < n_slices;
i++) {
978 s->gb = slices[
i].gb;
979 s->mb_y = slices[
i].mby_start;
1004 if (n_slices == 0) {
1014 for (
i = 0 ;
i < n_slices;
i++) {
1015 s->gb = slices[
i].gb;
1016 s->mb_y = slices[
i].mby_start;
1043 s->current_picture.f->linesize[0] <<= 1;
1044 s->current_picture.f->linesize[1] <<= 1;
1045 s->current_picture.f->linesize[2] <<= 1;
1047 s->uvlinesize <<= 1;
1053 for (
i = 0;
i <= n_slices;
i++) {
1054 if (
i > 0 && slices[
i - 1].mby_start >= mb_height) {
1057 "picture boundary (%d >= %d)\n",
i,
1058 slices[
i - 1].mby_start, mb_height);
1064 v->
mb_off =
s->mb_stride *
s->mb_height >> 1;
1093 s->start_mb_y = (
i == 0) ? 0 :
FFMAX(0, slices[
i-1].mby_start % mb_height);
1095 s->end_mb_y = (
i == n_slices ) ? mb_height :
FFMIN(mb_height, slices[
i].mby_start % mb_height);
1097 if (
i >= n_slices) {
1101 s->end_mb_y = (
i == n_slices1 + 1) ? mb_height :
FFMIN(mb_height, slices[
i].mby_start % mb_height);
1103 if (
s->end_mb_y <=
s->start_mb_y) {
1114 if (
i != n_slices) {
1115 s->gb = slices[
i].gb;
1120 s->current_picture.f->linesize[0] >>= 1;
1121 s->current_picture.f->linesize[1] >>= 1;
1122 s->current_picture.f->linesize[2] >>= 1;
1124 s->uvlinesize >>= 1;
1130 ff_dlog(
s->avctx,
"Consumed %i/%i bits\n",
1152 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
1153 if ((
ret = vc1_decode_sprites(v, &
s->gb)) < 0)
1166 }
else if (
s->last_picture_ptr) {
1177 for (
i = 0;
i < n_slices;
i++)
1184 for (
i = 0;
i < n_slices;
i++)
1192 #if CONFIG_VC1_DXVA2_HWACCEL
1195 #if CONFIG_VC1_D3D11VA_HWACCEL
1199 #if CONFIG_VC1_NVDEC_HWACCEL
1202 #if CONFIG_VC1_VAAPI_HWACCEL
1205 #if CONFIG_VC1_VDPAU_HWACCEL
1225 #if CONFIG_VC1_DXVA2_HWACCEL
1228 #if CONFIG_VC1_D3D11VA_HWACCEL
1231 #if CONFIG_VC1_D3D11VA2_HWACCEL
1234 #if CONFIG_VC1_NVDEC_HWACCEL
1237 #if CONFIG_VC1_VAAPI_HWACCEL
1240 #if CONFIG_VC1_VDPAU_HWACCEL
1248 #if CONFIG_WMV3_DECODER
1262 #if CONFIG_WMV3_DXVA2_HWACCEL
1265 #if CONFIG_WMV3_D3D11VA_HWACCEL
1268 #if CONFIG_WMV3_D3D11VA2_HWACCEL
1271 #if CONFIG_WMV3_NVDEC_HWACCEL
1274 #if CONFIG_WMV3_VAAPI_HWACCEL
1277 #if CONFIG_WMV3_VDPAU_HWACCEL
1286 #if CONFIG_WMV3IMAGE_DECODER
1288 .
name =
"wmv3image",
1297 .
flush = vc1_sprite_flush,
1305 #if CONFIG_VC1IMAGE_DECODER
1316 .
flush = vc1_sprite_flush,
static void error(const char *err)
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
#define AV_LOG_WARNING
Something somehow does not look correct.
int new_sprite
Frame decoding info for sprite modes.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
AVPixelFormat
Pixel format.
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ PROGRESSIVE
in the bitstream is reported as 00b
enum AVColorSpace colorspace
YUV colorspace type.
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
#define FFSWAP(type, a, b)
int end_mb_x
Horizontal macroblock limit (used only by mss2)
int interlace
Progressive/interlaced (RPTFTM syntax element)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
static int get_bits_count(const GetBitContext *s)
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int field_picture
whether or not the picture was encoded in separate fields
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
#define HWACCEL_DXVA2(codec)
int top_field_first
If the content is interlaced, is top field displayed first.
#define HWACCEL_D3D11VA2(codec)
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
#define PICT_BOTTOM_FIELD
struct AVCodecContext * avctx
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const av_always_inline uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
const struct AVCodec * codec
enum AVDiscard skip_frame
Skip decoding for selected frames.
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
int first_pic_header_flag
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
int flags
AV_CODEC_FLAG_*.
#define HWACCEL_VDPAU(codec)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
void ff_mpv_common_end(MpegEncContext *s)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold int ff_intrax8_common_init(AVCodecContext *avctx, IntraX8Context *w, IDCTDSPContext *idsp, int16_t(*block)[64], int block_last_index[12], int mb_width, int mb_height)
Initialize IntraX8 frame decoder.
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int has_b_frames
Size of the frame reordering buffer in the decoder.
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
int res_sprite
Simple/Main Profile sequence header.
int res_fasttx
reserved, always 1
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
uint8_t * over_flags_plane
Overflags bitplane.
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
@ AVDISCARD_ALL
discard all
uint8_t * forward_mb_plane
bitplane for "forward" MBs
static void flush(AVCodecContext *avctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
uint8_t * direct_mb_plane
bitplane for "direct" MBs
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
int field_mode
1 for interlaced field pictures
void ff_vc1_decode_blocks(VC1Context *v)
uint8_t * blk_mv_type_base
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
int16_t(* luma_mv_base)[2]
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
@ AVCOL_RANGE_UNSPECIFIED
H264ChromaContext h264chroma
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_vc1_adv_interlaced_8x8_zz[64]
@ AVDISCARD_NONKEY
discard all frames except keyframes
enum AVPictureType pict_type
Picture type of the frame.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
AVCodec ff_vc1image_decoder
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
void ff_mpeg_flush(AVCodecContext *avctx)
#define HWACCEL_D3D11VA(codec)
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
#define HWACCEL_NVDEC(codec)
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int interlaced_frame
The content of the picture is interlaced.
Picture * current_picture_ptr
pointer to the current picture
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
AVCodec ff_wmv3image_decoder
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int * ttblk
Transform type at the block level.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void ff_mpv_frame_end(MpegEncContext *s)
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
AVFrame * sprite_output_frame
int color_prim
8 bits, chroma coordinates of the color primaries
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
VLC * cbpcy_vlc
CBPCY VLC table.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const AVProfile ff_vc1_profiles[]
int matrix_coef
8 bits, Color primaries->YCbCr transform matrix
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
int coded_width
Bitstream width / height, may be different from width/height e.g.
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
@ AV_PICTURE_TYPE_P
Predicted.
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
#define avpriv_request_sample(...)
static const int16_t alpha[]
int level
Advanced Profile.
This structure stores compressed data.
uint8_t * acpred_plane
AC prediction flags bitplane.
void ff_er_frame_end(ERContext *s)
@ AV_PICTURE_TYPE_BI
BI type.
#define HWACCEL_VAAPI(codec)
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int transfer_char
8 bits, Opto-electronic transfer characteristics
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
@ AVDISCARD_NONREF
discard all non reference