Go to the documentation of this file.
60 fs->plane_count =
f->plane_count;
61 fs->transparency =
f->transparency;
62 for (j = 0; j <
f->plane_count; j++) {
86 for (j = 1; j < 256; j++) {
87 fs->c. one_state[ j] =
f->state_transition[j];
88 fs->c.zero_state[256 - j] = 256 -
fs->c.one_state[j];
98 for (
i = 0;
i <
f->max_slice_count;
i++) {
108 int i, max_slice_count =
f->num_h_slices *
f->num_v_slices;
112 for (
i = 0;
i < max_slice_count;) {
113 int sx =
i %
f->num_h_slices;
114 int sy =
i /
f->num_h_slices;
115 int sxs =
f->avctx->width * sx /
f->num_h_slices;
116 int sxe =
f->avctx->width * (sx + 1) /
f->num_h_slices;
117 int sys =
f->avctx->height * sy /
f->num_v_slices;
118 int sye =
f->avctx->height * (sy + 1) /
f->num_v_slices;
124 f->slice_context[
i++] =
fs;
125 memcpy(
fs,
f,
sizeof(*
fs));
126 memset(
fs->rc_stat2, 0,
sizeof(
fs->rc_stat2));
128 fs->slice_width = sxe - sxs;
129 fs->slice_height = sye - sys;
134 sizeof(*
fs->sample_buffer));
136 sizeof(*
fs->sample_buffer32));
137 if (!
fs->sample_buffer || !
fs->sample_buffer32)
140 f->max_slice_count = max_slice_count;
144 f->max_slice_count =
i;
152 for (
i = 0;
i <
f->quant_table_count;
i++) {
154 sizeof(*
f->initial_states[
i]));
155 if (!
f->initial_states[
i])
157 memset(
f->initial_states[
i], 128,
158 f->context_count[
i] *
sizeof(*
f->initial_states[
i]));
167 for (
i = 0;
i <
f->plane_count;
i++) {
196 for (j = 0; j <
s->max_slice_count; j++) {
198 for (
i = 0;
i <
s->plane_count;
i++) {
209 for (j = 0; j <
s->quant_table_count; j++) {
211 for (
i = 0;
i <
s->max_slice_count;
i++) {
218 for (
i = 0;
i <
s->max_slice_count;
i++)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t(* state)[CONTEXT_SIZE]
#define AC_RANGE_CUSTOM_TAB
av_cold int ff_ffv1_init_slices_state(FFV1Context *f)
int flags
AV_CODEC_FLAG_*.
void ff_ffv1_clear_slice_state(const FFV1Context *f, FFV1Context *fs)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t interlace_bit_state[2]
#define fs(width, name, subs,...)
char * stats_out
pass1 encoding statistics output buffer
av_cold int ff_ffv1_close(AVCodecContext *avctx)
av_cold int ff_ffv1_init_slice_state(const FFV1Context *f, FFV1Context *fs)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_calloc(size_t nmemb, size_t size)
uint64_t(*[MAX_QUANT_TABLES] rc_stat2)[32][2]
int ff_ffv1_allocate_initial_states(FFV1Context *f)
main external API structure.
int width
picture width / height.
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.