79 unsigned int frame_marker;
80 unsigned int profile_low_bit, profile_high_bit, reserved_zero;
81 unsigned int error_resilient_mode;
89 if (frame_marker != 2) {
97 frame->
profile = (profile_high_bit << 1) | profile_low_bit;
100 if (reserved_zero != 0) {
102 "unsupported profile or invalid bitstream.\n");
118 frame_sync_code =
get_bits(&bc, 24);
119 if (frame_sync_code != 0x498342) {
132 if (error_resilient_mode == 0) {
137 frame_sync_code =
get_bits(&bc, 24);
138 if (frame_sync_code != 0x498342) {
140 "%06x.\n", frame_sync_code);
144 unsigned int color_space;
150 if (color_space != 7 ) {
177 *next_display = last_frame, *
frame;
181 frame = ctx->
slot[
s];
184 if (frame->needs_output && (!next_output ||
187 if (frame->needs_display && (!next_display ||
188 frame->pts < next_display->pts))
189 next_display =
frame;
192 if (!next_output && !next_display)
195 if (!next_display || (next_output &&
196 next_output->
sequence < next_display->sequence))
199 frame = next_display;
201 if (frame->needs_output && frame->needs_display &&
202 next_output == next_display) {
204 "%"PRId64
" (%"PRId64
") in order.\n",
205 frame->sequence, frame->pts);
209 frame->needs_output = frame->needs_display = 0;
210 }
else if (frame->needs_output) {
211 if (frame->needs_display) {
213 "(%"PRId64
") for later display.\n",
214 frame->sequence, frame->pts);
217 "%"PRId64
" (%"PRId64
") to keep order.\n",
218 frame->sequence, frame->pts);
224 frame->needs_output = 0;
228 av_assert0(!frame->needs_output && frame->needs_display);
230 if (frame->slots == 0) {
232 "which is no longer available?\n");
233 frame->needs_display = 0;
241 "(%"PRId64
") from slot %d.\n",
242 frame->sequence, frame->pts, s);
253 put_bits(&pb, 1, frame->profile & 1);
255 put_bits(&pb, 1, (frame->profile >> 1) & 1);
256 if (frame->profile == 3) {
269 out->
pts = out->
dts = frame->pts;
271 frame->needs_display = 0;
295 if ((in->
data[in->
size - 1] & 0xe0) == 0xc0) {
314 "frame: %d.\n", err);
323 "(%"PRId64
"): show %u.\n", frame->
sequence,
327 "(%"PRId64
"): type %u show %u refresh %02x.\n",
346 "output overwriting slot %d: %d.\n",
369 "for transient frame.\n");
412 .
name =
"vp9_raw_reorder",
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void flush(AVCodecContext *avctx)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Memory handling functions.
The bitstream filter state.
unsigned int frame_to_show
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * priv_data
Opaque filter-specific private data.
static int vp9_raw_reorder_filter(AVBSFContext *bsf, AVPacket *out)
static void vp9_raw_reorder_close(AVBSFContext *bsf)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int vp9_raw_reorder_frame_parse(AVBSFContext *bsf, VP9RawReorderFrame *frame)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
const AVBitStreamFilter ff_vp9_raw_reorder_bsf
static void vp9_raw_reorder_flush(AVBSFContext *bsf)
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
static int FUNC() frame_sync_code(CodedBitstreamContext *ctx, RWContext *rw, VP9RawFrameHeader *current)
#define AVERROR_EOF
End of file.
bitstream reader API header.
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
AVCodecID
Identify the syntax and semantics of the bitstream.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
simple assert() macros that are a bit more flexible than ISO C assert().
VP9RawReorderFrame * next_frame
static int put_bits_count(PutBitContext *s)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
unsigned int refresh_frame_flags
static unsigned int get_bits1(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void vp9_raw_reorder_clear_slot(VP9RawReorderContext *ctx, int s)
static void skip_bits(GetBitContext *s, int n)
VP9RawReorderFrame * slot[FRAME_SLOTS]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static enum AVCodecID codec_ids[]
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
Called by the bitstream filters to get the next packet for filtering.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static enum AVCodecID vp9_raw_reorder_codec_ids[]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
static void vp9_raw_reorder_frame_free(VP9RawReorderFrame **frame)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int vp9_raw_reorder_make_output(AVBSFContext *bsf, AVPacket *out, VP9RawReorderFrame *last_frame)
This structure stores compressed data.
static void * next_output(const AVOutputFormat *prev, AVClassCategory c2)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
unsigned int show_existing_frame