29 if (memcmp(p->
buf,
"XVAG", 4) ||
30 memcmp(p->
buf+32,
"fmat", 4))
38 unsigned offset, big_endian, codec;
112 .extensions =
"xvag",
#define FF_SANE_NB_CHANNELS
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int xvag_probe(const AVProbeData *p)
#define avpriv_request_sample(...)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
int64_t avio_skip(AVIOContext *s, int64_t offset)
Skip given number of bytes forward.
unsigned int avio_rb16(AVIOContext *s)
This struct describes the properties of an encoded stream.
unsigned int avio_rb32(AVIOContext *s)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
enum AVStreamParseType need_parsing
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVStream ** streams
A list of all streams in the file.
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
unsigned int avio_rl32(AVIOContext *s)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
preferred ID for decoding MPEG audio layer 1, 2 or 3
enum AVMediaType codec_type
General type of the encoded data.
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
int block_align
Audio only.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int xvag_read_header(AVFormatContext *s)
AVIOContext * pb
I/O context.
static int xvag_read_packet(AVFormatContext *s, AVPacket *pkt)
This structure contains the data a format has to probe a file.
int64_t duration
Decoding: duration of the stream, in stream time base.
int sample_rate
Audio only.
common internal api header.
AVInputFormat ff_xvag_demuxer
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.