Go to the documentation of this file.
23 #define UNCHECKED_BITSTREAM_READER 1
25 #include "config_components.h"
56 #define SPRITE_TRAJ_VLC_BITS 6
58 #define MB_TYPE_B_VLC_BITS 4
59 #define STUDIO_INTRA_BITS 9
78 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
79 uint8_t *
const *ref_picture)
82 int src_x, src_y, motion_x, motion_y;
83 ptrdiff_t
offset, linesize, uvlinesize;
86 motion_x =
ctx->sprite_offset[0][0];
87 motion_y =
ctx->sprite_offset[0][1];
88 src_x =
s->mb_x * 16 + (motion_x >> (
ctx->sprite_warping_accuracy + 1));
89 src_y =
s->mb_y * 16 + (motion_y >> (
ctx->sprite_warping_accuracy + 1));
90 motion_x *= 1 << (3 -
ctx->sprite_warping_accuracy);
91 motion_y *= 1 << (3 -
ctx->sprite_warping_accuracy);
92 src_x =
av_clip(src_x, -16,
s->width);
93 if (src_x ==
s->width)
95 src_y =
av_clip(src_y, -16,
s->height);
96 if (src_y ==
s->height)
99 linesize =
s->linesize;
100 uvlinesize =
s->uvlinesize;
102 ptr = ref_picture[0] + src_y * linesize + src_x;
104 if ((
unsigned)src_x >=
FFMAX(
s->h_edge_pos - 17, 0) ||
105 (
unsigned)src_y >=
FFMAX(
s->v_edge_pos - 17, 0)) {
106 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
110 s->h_edge_pos,
s->v_edge_pos);
111 ptr =
s->sc.edge_emu_buffer;
114 if ((motion_x | motion_y) & 7) {
115 ctx->mdsp.gmc1(dest_y, ptr, linesize, 16,
116 motion_x & 15, motion_y & 15, 128 -
s->no_rounding);
117 ctx->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
118 motion_x & 15, motion_y & 15, 128 -
s->no_rounding);
122 dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
123 if (
s->no_rounding) {
124 s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
126 s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
133 motion_x =
ctx->sprite_offset[1][0];
134 motion_y =
ctx->sprite_offset[1][1];
135 src_x =
s->mb_x * 8 + (motion_x >> (
ctx->sprite_warping_accuracy + 1));
136 src_y =
s->mb_y * 8 + (motion_y >> (
ctx->sprite_warping_accuracy + 1));
137 motion_x *= 1 << (3 -
ctx->sprite_warping_accuracy);
138 motion_y *= 1 << (3 -
ctx->sprite_warping_accuracy);
139 src_x =
av_clip(src_x, -8,
s->width >> 1);
140 if (src_x ==
s->width >> 1)
142 src_y =
av_clip(src_y, -8,
s->height >> 1);
143 if (src_y ==
s->height >> 1)
146 offset = (src_y * uvlinesize) + src_x;
147 ptr = ref_picture[1] +
offset;
148 if ((
unsigned)src_x >=
FFMAX((
s->h_edge_pos >> 1) - 9, 0) ||
149 (unsigned)src_y >=
FFMAX((
s->v_edge_pos >> 1) - 9, 0)) {
150 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
151 uvlinesize, uvlinesize,
154 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
155 ptr =
s->sc.edge_emu_buffer;
158 ctx->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
159 motion_x & 15, motion_y & 15, 128 -
s->no_rounding);
161 ptr = ref_picture[2] +
offset;
163 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
164 uvlinesize, uvlinesize,
167 s->h_edge_pos >> 1,
s->v_edge_pos >> 1);
168 ptr =
s->sc.edge_emu_buffer;
170 ctx->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
171 motion_x & 15, motion_y & 15, 128 -
s->no_rounding);
175 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
176 uint8_t *
const *ref_picture)
179 int linesize, uvlinesize;
180 const int a =
ctx->sprite_warping_accuracy;
183 linesize =
s->linesize;
184 uvlinesize =
s->uvlinesize;
186 ptr = ref_picture[0];
188 ox =
ctx->sprite_offset[0][0] +
ctx->sprite_delta[0][0] *
s->mb_x * 16 +
189 ctx->sprite_delta[0][1] *
s->mb_y * 16;
190 oy =
ctx->sprite_offset[0][1] +
ctx->sprite_delta[1][0] *
s->mb_x * 16 +
191 ctx->sprite_delta[1][1] *
s->mb_y * 16;
193 ctx->mdsp.gmc(dest_y, ptr, linesize, 16,
195 ctx->sprite_delta[0][0],
ctx->sprite_delta[0][1],
196 ctx->sprite_delta[1][0],
ctx->sprite_delta[1][1],
197 a + 1, (1 << (2 *
a + 1)) -
s->no_rounding,
198 s->h_edge_pos,
s->v_edge_pos);
199 ctx->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
200 ox +
ctx->sprite_delta[0][0] * 8,
201 oy +
ctx->sprite_delta[1][0] * 8,
202 ctx->sprite_delta[0][0],
ctx->sprite_delta[0][1],
203 ctx->sprite_delta[1][0],
ctx->sprite_delta[1][1],
204 a + 1, (1 << (2 *
a + 1)) -
s->no_rounding,
205 s->h_edge_pos,
s->v_edge_pos);
210 ox =
ctx->sprite_offset[1][0] +
ctx->sprite_delta[0][0] *
s->mb_x * 8 +
211 ctx->sprite_delta[0][1] *
s->mb_y * 8;
212 oy =
ctx->sprite_offset[1][1] +
ctx->sprite_delta[1][0] *
s->mb_x * 8 +
213 ctx->sprite_delta[1][1] *
s->mb_y * 8;
215 ptr = ref_picture[1];
216 ctx->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
218 ctx->sprite_delta[0][0],
ctx->sprite_delta[0][1],
219 ctx->sprite_delta[1][0],
ctx->sprite_delta[1][1],
220 a + 1, (1 << (2 *
a + 1)) -
s->no_rounding,
221 (
s->h_edge_pos + 1) >> 1, (
s->v_edge_pos + 1) >> 1);
223 ptr = ref_picture[2];
224 ctx->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
226 ctx->sprite_delta[0][0],
ctx->sprite_delta[0][1],
227 ctx->sprite_delta[1][0],
ctx->sprite_delta[1][1],
228 a + 1, (1 << (2 *
a + 1)) -
s->no_rounding,
229 (
s->h_edge_pos + 1) >> 1, (
s->v_edge_pos + 1) >> 1);
233 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
234 uint8_t *
const *ref_picture)
238 if (
ctx->real_sprite_warping_points == 1) {
248 uint8_t *dest_cr,
int block_size,
int uvlinesize,
249 int dct_linesize,
int dct_offset)
252 const int act_block_size = block_size * 2;
254 if (
ctx->dpcm_direction == 0) {
255 s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)
ctx->block32[0]);
256 s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)
ctx->block32[1]);
257 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)
ctx->block32[2]);
258 s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)
ctx->block32[3]);
260 dct_linesize = uvlinesize <<
s->interlaced_dct;
261 dct_offset =
s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
263 s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)
ctx->block32[4]);
264 s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)
ctx->block32[5]);
265 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)
ctx->block32[6]);
266 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)
ctx->block32[7]);
267 if (!
s->chroma_x_shift){
268 s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)
ctx->block32[8]);
269 s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)
ctx->block32[9]);
270 s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)
ctx->block32[10]);
271 s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)
ctx->block32[11]);
273 }
else if (
ctx->dpcm_direction == 1) {
274 uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
275 int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
276 for (
int i = 0;
i < 3;
i++) {
277 const uint16_t *
src =
ctx->dpcm_macroblock[
i];
278 int vsub =
i ?
s->chroma_y_shift : 0;
279 int hsub =
i ?
s->chroma_x_shift : 0;
282 for (
int h = 0;
h < (16 >> (vsub +
lowres));
h++){
284 dest_pcm[
i][
w] =
src[idx];
285 dest_pcm[
i] += linesize[
i] / 2;
290 uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
291 int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
293 for (
int i = 0;
i < 3;
i++) {
294 const uint16_t *
src =
ctx->dpcm_macroblock[
i];
295 int vsub =
i ?
s->chroma_y_shift : 0;
296 int hsub =
i ?
s->chroma_x_shift : 0;
299 dest_pcm[
i] += (linesize[
i] / 2) * ((16 >> vsub +
lowres) - 1);
300 for (
int h = (16 >> (vsub +
lowres)) - 1;
h >= 0;
h--){
302 dest_pcm[
i][
w] =
src[idx];
304 dest_pcm[
i] -= linesize[
i] / 2;
318 int16_t *ac_val, *ac_val1;
319 int8_t *
const qscale_table =
s->current_picture.qscale_table;
322 ac_val = &
s->ac_val[0][0][0] +
s->block_index[n] * 16;
326 const int xy =
s->mb_x - 1 +
s->mb_y *
s->mb_stride;
330 if (
s->mb_x == 0 ||
s->qscale == qscale_table[xy] ||
333 for (
i = 1;
i < 8;
i++)
334 block[
s->idsp.idct_permutation[
i << 3]] += ac_val[
i];
337 for (
i = 1;
i < 8;
i++)
338 block[
s->idsp.idct_permutation[
i << 3]] +=
ROUNDED_DIV(ac_val[
i] * qscale_table[xy],
s->qscale);
341 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride -
s->mb_stride;
343 ac_val -= 16 *
s->block_wrap[n];
345 if (
s->mb_y == 0 ||
s->qscale == qscale_table[xy] ||
348 for (
i = 1;
i < 8;
i++)
349 block[
s->idsp.idct_permutation[
i]] += ac_val[
i + 8];
352 for (
i = 1;
i < 8;
i++)
358 for (
i = 1;
i < 8;
i++)
359 ac_val1[
i] =
block[
s->idsp.idct_permutation[
i << 3]];
363 ac_val1[8 +
i] =
block[
s->idsp.idct_permutation[
i]];
381 (v >> (8 -
s->pict_type) != 1) ||
s->partitioned_frame)
384 bits_count += 8 +
s->pict_type;
388 if (bits_count + 8 >=
s->gb.size_in_bits) {
390 v |= 0x7F >> (7 - (bits_count & 7));
395 static const uint16_t mpeg4_resync_prefix[8] = {
396 0x7F00, 0x7E00, 0x7C00, 0x7800, 0x7000, 0x6000, 0x4000, 0x0000
399 if (v == mpeg4_resync_prefix[bits_count & 7]) {
401 int mb_num_bits =
av_log2(
s->mb_num - 1) + 1;
412 if (!mb_num || mb_num >
s->mb_num ||
get_bits_count(&
s->gb)+6 >
s->gb.size_in_bits)
427 int a = 2 <<
ctx->sprite_warping_accuracy;
428 int rho = 3 -
ctx->sprite_warping_accuracy;
434 int min_ab,
i, w2, h2, w3, h3;
435 int sprite_ref[4][2];
436 int virtual_ref[2][2];
441 const int vop_ref[4][2] = { { 0, 0 }, {
s->width, 0 },
442 { 0,
s->height }, {
s->width,
s->height } };
443 int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
445 if (
w <= 0 ||
h <= 0)
448 for (
i = 0;
i <
ctx->num_sprite_warping_points;
i++) {
456 if (!(
ctx->divx_version == 500 &&
ctx->divx_build == 413))
464 ctx->sprite_traj[
i][0] =
d[
i][0] = x;
465 ctx->sprite_traj[
i][1] =
d[
i][1] = y;
468 ctx->sprite_traj[
i][0] =
ctx->sprite_traj[
i][1] = 0;
472 while ((1 << beta) <
h)
478 if (
ctx->divx_version == 500 &&
ctx->divx_build == 413) {
479 sprite_ref[0][0] =
a * vop_ref[0][0] +
d[0][0];
480 sprite_ref[0][1] =
a * vop_ref[0][1] +
d[0][1];
481 sprite_ref[1][0] =
a * vop_ref[1][0] +
d[0][0] +
d[1][0];
482 sprite_ref[1][1] =
a * vop_ref[1][1] +
d[0][1] +
d[1][1];
483 sprite_ref[2][0] =
a * vop_ref[2][0] +
d[0][0] +
d[2][0];
484 sprite_ref[2][1] =
a * vop_ref[2][1] +
d[0][1] +
d[2][1];
486 sprite_ref[0][0] = (
a >> 1) * (2 * vop_ref[0][0] +
d[0][0]);
487 sprite_ref[0][1] = (
a >> 1) * (2 * vop_ref[0][1] +
d[0][1]);
488 sprite_ref[1][0] = (
a >> 1) * (2 * vop_ref[1][0] +
d[0][0] +
d[1][0]);
489 sprite_ref[1][1] = (
a >> 1) * (2 * vop_ref[1][1] +
d[0][1] +
d[1][1]);
490 sprite_ref[2][0] = (
a >> 1) * (2 * vop_ref[2][0] +
d[0][0] +
d[2][0]);
491 sprite_ref[2][1] = (
a >> 1) * (2 * vop_ref[2][1] +
d[0][1] +
d[2][1]);
501 virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
503 (
r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) +
504 w2 * (
r * sprite_ref[1][0] - 16LL * vop_ref[1][0])),
w);
505 virtual_ref[0][1] = 16 * vop_ref[0][1] +
507 (
r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) +
508 w2 * (
r * sprite_ref[1][1] - 16LL * vop_ref[1][1])),
w);
509 virtual_ref[1][0] = 16 * vop_ref[0][0] +
510 ROUNDED_DIV(((
h - h2) * (
r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) +
511 h2 * (
r * sprite_ref[2][0] - 16LL * vop_ref[2][0])),
h);
512 virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
513 ROUNDED_DIV(((
h - h2) * (
r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) +
514 h2 * (
r * sprite_ref[2][1] - 16LL * vop_ref[2][1])),
h);
516 switch (
ctx->num_sprite_warping_points) {
518 sprite_offset[0][0] =
519 sprite_offset[0][1] =
520 sprite_offset[1][0] =
521 sprite_offset[1][1] = 0;
522 sprite_delta[0][0] =
a;
524 sprite_delta[1][0] = 0;
525 sprite_delta[1][1] =
a;
526 ctx->sprite_shift[0] =
527 ctx->sprite_shift[1] = 0;
530 sprite_offset[0][0] = sprite_ref[0][0] -
a * vop_ref[0][0];
531 sprite_offset[0][1] = sprite_ref[0][1] -
a * vop_ref[0][1];
532 sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
533 a * (vop_ref[0][0] / 2);
534 sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
535 a * (vop_ref[0][1] / 2);
536 sprite_delta[0][0] =
a;
538 sprite_delta[1][0] = 0;
539 sprite_delta[1][1] =
a;
540 ctx->sprite_shift[0] =
541 ctx->sprite_shift[1] = 0;
544 sprite_offset[0][0] = ((
int64_t) sprite_ref[0][0] * (1 <<
alpha + rho)) +
545 ((
int64_t) -
r * sprite_ref[0][0] + virtual_ref[0][0]) *
547 ((
int64_t)
r * sprite_ref[0][1] - virtual_ref[0][1]) *
549 sprite_offset[0][1] = ((
int64_t) sprite_ref[0][1] * (1 <<
alpha + rho)) +
550 ((
int64_t) -
r * sprite_ref[0][1] + virtual_ref[0][1]) *
552 ((
int64_t) -
r * sprite_ref[0][0] + virtual_ref[0][0]) *
554 sprite_offset[1][0] = (((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[0][0]) *
555 ((
int64_t)-2 * vop_ref[0][0] + 1) +
556 ((
int64_t)
r * sprite_ref[0][1] - virtual_ref[0][1]) *
557 ((
int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 *
r *
558 (
int64_t) sprite_ref[0][0] - 16 * w2 + (1 << (
alpha + rho + 1)));
559 sprite_offset[1][1] = (((
int64_t)-
r * sprite_ref[0][1] + virtual_ref[0][1]) *
560 ((
int64_t)-2 * vop_ref[0][0] + 1) +
561 ((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[0][0]) *
562 ((
int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 *
r *
563 (
int64_t) sprite_ref[0][1] - 16 * w2 + (1 << (
alpha + rho + 1)));
564 sprite_delta[0][0] = (-
r * sprite_ref[0][0] + virtual_ref[0][0]);
565 sprite_delta[0][1] = (+
r * sprite_ref[0][1] - virtual_ref[0][1]);
566 sprite_delta[1][0] = (-
r * sprite_ref[0][1] + virtual_ref[0][1]);
567 sprite_delta[1][1] = (-
r * sprite_ref[0][0] + virtual_ref[0][0]);
570 ctx->sprite_shift[1] =
alpha + rho + 2;
576 sprite_offset[0][0] = ((
int64_t)sprite_ref[0][0] * (1 << (
alpha + beta + rho - min_ab))) +
577 ((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-vop_ref[0][0]) +
578 ((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-vop_ref[0][1]) +
580 sprite_offset[0][1] = ((
int64_t)sprite_ref[0][1] * (1 << (
alpha + beta + rho - min_ab))) +
581 ((
int64_t)-
r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-vop_ref[0][0]) +
582 ((
int64_t)-
r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-vop_ref[0][1]) +
584 sprite_offset[1][0] = ((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-2 * vop_ref[0][0] + 1) +
585 ((
int64_t)-
r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-2 * vop_ref[0][1] + 1) +
586 (
int64_t)2 * w2 * h3 *
r * sprite_ref[0][0] - 16 * w2 * h3 +
588 sprite_offset[1][1] = ((
int64_t)-
r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-2 * vop_ref[0][0] + 1) +
589 ((
int64_t)-
r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-2 * vop_ref[0][1] + 1) +
590 (
int64_t)2 * w2 * h3 *
r * sprite_ref[0][1] - 16 * w2 * h3 +
592 sprite_delta[0][0] = (-
r * (
int64_t)sprite_ref[0][0] + virtual_ref[0][0]) * h3;
593 sprite_delta[0][1] = (-
r * (
int64_t)sprite_ref[0][0] + virtual_ref[1][0]) * w3;
594 sprite_delta[1][0] = (-
r * (
int64_t)sprite_ref[0][1] + virtual_ref[0][1]) * h3;
595 sprite_delta[1][1] = (-
r * (
int64_t)sprite_ref[0][1] + virtual_ref[1][1]) * w3;
597 ctx->sprite_shift[0] =
alpha + beta + rho - min_ab;
598 ctx->sprite_shift[1] =
alpha + beta + rho - min_ab + 2;
604 if (sprite_delta[0][0] ==
a <<
ctx->sprite_shift[0] &&
605 sprite_delta[0][1] == 0 &&
606 sprite_delta[1][0] == 0 &&
607 sprite_delta[1][1] ==
a <<
ctx->sprite_shift[0]) {
608 sprite_offset[0][0] >>=
ctx->sprite_shift[0];
609 sprite_offset[0][1] >>=
ctx->sprite_shift[0];
610 sprite_offset[1][0] >>=
ctx->sprite_shift[1];
611 sprite_offset[1][1] >>=
ctx->sprite_shift[1];
612 sprite_delta[0][0] =
a;
613 sprite_delta[0][1] = 0;
614 sprite_delta[1][0] = 0;
615 sprite_delta[1][1] =
a;
616 ctx->sprite_shift[0] = 0;
617 ctx->sprite_shift[1] = 0;
618 ctx->real_sprite_warping_points = 1;
620 int shift_y = 16 -
ctx->sprite_shift[0];
621 int shift_c = 16 -
ctx->sprite_shift[1];
623 for (
i = 0;
i < 2;
i++) {
624 if (shift_c < 0 || shift_y < 0 ||
625 FFABS( sprite_offset[0][
i]) >= INT_MAX >> shift_y ||
626 FFABS( sprite_offset[1][
i]) >= INT_MAX >> shift_c ||
627 FFABS( sprite_delta[0][
i]) >= INT_MAX >> shift_y ||
628 FFABS( sprite_delta[1][
i]) >= INT_MAX >> shift_y
635 for (
i = 0;
i < 2;
i++) {
636 sprite_offset[0][
i] *= 1 << shift_y;
637 sprite_offset[1][
i] *= 1 << shift_c;
638 sprite_delta[0][
i] *= 1 << shift_y;
639 sprite_delta[1][
i] *= 1 << shift_y;
640 ctx->sprite_shift[
i] = 16;
643 for (
i = 0;
i < 2;
i++) {
645 sprite_delta[
i][0] -
a * (1LL<<16),
646 sprite_delta[
i][1] -
a * (1LL<<16)
649 if (llabs(sprite_offset[0][
i] + sprite_delta[
i][0] * (
w+16LL)) >= INT_MAX ||
650 llabs(sprite_offset[0][
i] + sprite_delta[
i][1] * (
h+16LL)) >= INT_MAX ||
651 llabs(sprite_offset[0][
i] + sprite_delta[
i][0] * (
w+16LL) + sprite_delta[
i][1] * (
h+16LL)) >= INT_MAX ||
652 llabs(sprite_delta[
i][0] * (
w+16LL)) >= INT_MAX ||
653 llabs(sprite_delta[
i][1] * (
h+16LL)) >= INT_MAX ||
654 llabs(sd[0]) >= INT_MAX ||
655 llabs(sd[1]) >= INT_MAX ||
656 llabs(sprite_offset[0][
i] + sd[0] * (
w+16LL)) >= INT_MAX ||
657 llabs(sprite_offset[0][
i] + sd[1] * (
h+16LL)) >= INT_MAX ||
658 llabs(sprite_offset[0][
i] + sd[0] * (
w+16LL) + sd[1] * (
h+16LL)) >= INT_MAX
664 ctx->real_sprite_warping_points =
ctx->num_sprite_warping_points;
667 for (
i = 0;
i < 4;
i++) {
668 ctx->sprite_offset[
i&1][
i>>1] = sprite_offset[
i&1][
i>>1];
669 ctx->sprite_delta [
i&1][
i>>1] = sprite_delta [
i&1][
i>>1];
674 memset(
ctx->sprite_offset, 0,
sizeof(
ctx->sprite_offset));
675 memset(
ctx->sprite_delta, 0,
sizeof(
ctx->sprite_delta));
699 int mb_num_bits =
av_log2(
s->mb_num - 1) + 1;
700 int header_extension = 0, mb_num,
len;
721 if (mb_num >=
s->mb_num || !mb_num) {
723 "illegal mb_num in video packet (%d %d) \n", mb_num,
s->mb_num);
727 s->mb_x = mb_num %
s->mb_width;
728 s->mb_y = mb_num /
s->mb_width;
731 int qscale =
get_bits(&
s->gb,
s->quant_precision);
733 s->chroma_qscale =
s->qscale = qscale;
739 if (header_extension) {
743 check_marker(
s->avctx, &
s->gb,
"before time_increment in video packed header");
745 check_marker(
s->avctx, &
s->gb,
"before vop_coding_type in video packed header");
766 "Error, video packet header damaged (f_code=0)\n");
772 "Error, video packet header damaged (b_code=0)\n");
787 s->last_dc[2] = 1 << (
s->avctx->bits_per_raw_sample +
s->dct_precision +
s->intra_dc_precision - 1);
802 vlc_len =
av_log2(
s->mb_width *
s->mb_height) + 1;
805 if (mb_num >=
s->mb_num)
808 s->mb_x = mb_num %
s->mb_width;
809 s->mb_y = mb_num /
s->mb_width;
839 int x, y, mb_v, sum, dx, dy,
shift;
840 int len = 1 << (
s->f_code + 4);
841 const int a =
ctx->sprite_warping_accuracy;
844 len >>=
s->quarter_sample;
846 if (
ctx->real_sprite_warping_points == 1) {
847 if (
ctx->divx_version == 500 &&
ctx->divx_build == 413 &&
a >=
s->quarter_sample)
848 sum =
ctx->sprite_offset[0][n] / (1 << (
a -
s->quarter_sample));
850 sum =
RSHIFT(
ctx->sprite_offset[0][n] * (1 <<
s->quarter_sample),
a);
852 dx =
ctx->sprite_delta[n][0];
853 dy =
ctx->sprite_delta[n][1];
856 dy -= 1 << (
shift +
a + 1);
858 dx -= 1 << (
shift +
a + 1);
859 mb_v =
ctx->sprite_offset[0][n] + dx *
s->mb_x * 16
U + dy *
s->mb_y * 16
U;
862 for (y = 0; y < 16; y++) {
865 v = mb_v + (unsigned)dy * y;
867 for (x = 0; x < 16; x++) {
872 sum =
RSHIFT(sum,
a + 8 -
s->quarter_sample);
898 if (code < 0 || code > 9 ) {
940 static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
943 s->first_slice_line = 1;
944 for (;
s->mb_y <
s->mb_height;
s->mb_y++) {
946 for (;
s->mb_x <
s->mb_width;
s->mb_x++) {
947 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride;
953 s->avctx->lowres,
s->chroma_x_shift);
954 if (
s->mb_x ==
s->resync_mb_x &&
s->mb_y ==
s->resync_mb_y + 1)
955 s->first_slice_line = 0;
967 "mcbpc corrupted at %d %d\n",
s->mb_x,
s->mb_y);
972 s->cbp_table[xy] = cbpc & 3;
979 s->current_picture.qscale_table[xy] =
s->qscale;
981 s->mbintra_table[xy] = 1;
982 for (
i = 0;
i < 6;
i++) {
987 "DC corrupted at %d %d\n",
s->mb_x,
s->mb_y);
994 s->pred_dir_table[xy] = dir;
996 int mx, my, pred_x, pred_y,
bits;
997 int16_t *
const mot_val =
s->current_picture.motion_val[0][
s->block_index[0]];
998 const int stride =
s->b8_stride * 2;
1006 if (
bits & 0x10000) {
1025 mot_val[2 +
stride] = mx;
1029 mot_val[3 +
stride] = my;
1031 if (
s->mbintra_table[xy])
1039 "mcbpc corrupted at %d %d\n",
s->mb_x,
s->mb_y);
1045 s->cbp_table[xy] = cbpc & (8 + 3);
1047 s->mb_intra = ((cbpc & 4) != 0);
1051 s->mbintra_table[xy] = 1;
1061 if (
s->mbintra_table[xy])
1071 if ((cbpc & 16) == 0) {
1096 mot_val[2 +
stride] = mx;
1100 mot_val[3 +
stride] = my;
1105 for (
i = 0;
i < 4;
i++) {
1134 static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
1136 s->mb_x =
s->resync_mb_x;
1137 s->first_slice_line = 1;
1138 for (
s->mb_y =
s->resync_mb_y; mb_num < mb_count; s->mb_y++) {
1140 for (; mb_num < mb_count &&
s->mb_x <
s->mb_width;
s->mb_x++) {
1141 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride;
1145 s->avctx->lowres,
s->chroma_x_shift);
1146 if (
s->mb_x ==
s->resync_mb_x &&
s->mb_y ==
s->resync_mb_y + 1)
1147 s->first_slice_line = 0;
1154 "cbpy corrupted at %d %d\n",
s->mb_x,
s->mb_y);
1158 s->cbp_table[xy] |= cbpy << 2;
1161 if (
IS_INTRA(
s->current_picture.mb_type[xy])) {
1169 "I cbpy corrupted at %d %d\n",
s->mb_x,
s->mb_y);
1173 if (
s->cbp_table[xy] & 8)
1175 s->current_picture.qscale_table[xy] =
s->qscale;
1177 for (
i = 0;
i < 6;
i++) {
1182 "DC corrupted at %d %d\n",
s->mb_x,
s->mb_y);
1189 s->cbp_table[xy] &= 3;
1190 s->cbp_table[xy] |= cbpy << 2;
1192 s->pred_dir_table[xy] = dir;
1193 }
else if (
IS_SKIP(
s->current_picture.mb_type[xy])) {
1194 s->current_picture.qscale_table[xy] =
s->qscale;
1195 s->cbp_table[xy] = 0;
1201 "P cbpy corrupted at %d %d\n",
s->mb_x,
s->mb_y);
1205 if (
s->cbp_table[xy] & 8)
1207 s->current_picture.qscale_table[xy] =
s->qscale;
1209 s->cbp_table[xy] &= 3;
1210 s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
1214 if (mb_num >= mb_count)
1236 s->mb_x,
s->mb_y, part_a_error);
1240 if (
s->resync_mb_x +
s->resync_mb_y *
s->mb_width + mb_num >
s->mb_num) {
1243 s->mb_x,
s->mb_y, part_a_error);
1247 s->mb_num_left = mb_num;
1254 "marker missing after first I partition at %d %d\n",
1263 "marker missing after first P partition at %d %d\n",
1269 s->mb_x - 1,
s->mb_y, part_a_end);
1291 int n,
int coded,
int intra,
1292 int use_intra_dc_vlc,
int rvlc)
1299 const uint8_t *scan_table;
1304 if (use_intra_dc_vlc) {
1306 if (
s->partitioned_frame) {
1307 level =
s->dc_val[0][
s->block_index[n]];
1312 dc_pred_dir = (
s->pred_dir_table[
s->mb_x +
s->mb_y *
s->mb_stride] << n) & 32;
1335 if (dc_pred_dir == 0)
1336 scan_table =
s->permutated_intra_v_scantable;
1338 scan_table =
s->permutated_intra_h_scantable;
1340 scan_table =
s->intra_scantable.permutated;
1347 s->block_last_index[n] =
i;
1355 scan_table =
s->intra_scantable.permutated;
1357 if (
s->mpeg_quant) {
1365 qmul =
s->qscale << 1;
1366 qadd = (
s->qscale - 1) | 1;
1383 "1. marker bit missing in rvlc esc\n");
1396 "2. marker bit missing in rvlc esc\n");
1422 cache ^= 0xC0000000;
1424 if (cache & 0x80000000) {
1425 if (cache & 0x40000000) {
1440 "1. marker bit missing in 3. esc\n");
1451 "2. marker bit missing in 3. esc\n");
1460 if (
s->error_recognition >= FF_ER_COMPLIANT) {
1463 const int run1=
run - rl->
max_run[last][abs_level] - 1;
1464 if (abs_level <= rl->max_level[last][
run]) {
1468 if (
s->error_recognition > FF_ER_COMPLIANT) {
1469 if (abs_level <= rl->max_level[last][
run]*2) {
1473 if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) {
1486 if ((
unsigned)(
level + 2048) > 4095) {
1490 "|level| overflow in 3. esc, qp=%d\n",
1524 ff_tlog(
s->avctx,
"dct[%d][%d] = %- 4d end?:%d\n", scan_table[
i&63]&7, scan_table[
i&63] >> 3,
level,
i>62);
1529 "ac-tex damaged at %d %d\n",
s->mb_x,
s->mb_y);
1544 if (!use_intra_dc_vlc) {
1554 s->block_last_index[n] =
i;
1565 int cbp, mb_type, use_intra_dc_vlc;
1566 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride;
1570 mb_type =
s->current_picture.mb_type[xy];
1571 cbp =
s->cbp_table[xy];
1573 use_intra_dc_vlc =
s->qscale <
ctx->intra_dc_threshold;
1575 if (
s->current_picture.qscale_table[xy] !=
s->qscale)
1581 for (
i = 0;
i < 4;
i++) {
1582 s->mv[0][
i][0] =
s->current_picture.motion_val[0][
s->block_index[
i]][0];
1583 s->mv[0][
i][1] =
s->current_picture.motion_val[0][
s->block_index[
i]][1];
1589 for (
i = 0;
i < 6;
i++)
1590 s->block_last_index[
i] = -1;
1601 }
else if (
s->mb_intra) {
1602 s->ac_pred =
IS_ACPRED(
s->current_picture.mb_type[xy]);
1603 }
else if (!
s->mb_intra) {
1615 s->ac_pred =
IS_ACPRED(
s->current_picture.mb_type[xy]);
1620 s->bdsp.clear_blocks(
s->block[0]);
1622 for (
i = 0;
i < 6;
i++) {
1624 use_intra_dc_vlc,
ctx->rvlc) < 0) {
1626 "texture corrupted at %d %d %d\n",
1627 s->mb_x,
s->mb_y,
s->mb_intra);
1635 if (--
s->mb_num_left <= 0) {
1642 const int delta =
s->mb_x + 1 ==
s->mb_width ? 2 : 1;
1643 if (
s->cbp_table[xy +
delta])
1653 int cbpc, cbpy,
i, cbp, pred_x, pred_y, mx, my, dquant;
1655 static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
1656 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride;
1668 for (
i = 0;
i < 6;
i++)
1669 s->block_last_index[
i] = -1;
1696 "mcbpc damaged at %d %d\n",
s->mb_x,
s->mb_y);
1699 }
while (cbpc == 20);
1701 s->bdsp.clear_blocks(
s->block[0]);
1703 s->mb_intra = ((cbpc & 4) != 0);
1715 "P cbpy damaged at %d %d\n",
s->mb_x,
s->mb_y);
1719 cbp = (cbpc & 3) | (cbpy << 2);
1722 if ((!
s->progressive_sequence) &&
1727 if ((cbpc & 16) == 0) {
1736 s->mv[0][0][0] = mx;
1737 s->mv[0][0][1] = my;
1738 }
else if ((!
s->progressive_sequence) &&
get_bits1(&
s->gb)) {
1750 for (
i = 0;
i < 2;
i++) {
1759 s->mv[0][
i][0] = mx;
1760 s->mv[0][
i][1] = my;
1776 s->mv[0][0][0] = mx;
1777 s->mv[0][0][1] = my;
1782 for (
i = 0;
i < 4;
i++) {
1791 s->mv[0][
i][0] = mx;
1792 s->mv[0][
i][1] = my;
1806 for (
i = 0;
i < 2;
i++) {
1807 s->last_mv[
i][0][0] =
1808 s->last_mv[
i][0][1] =
1809 s->last_mv[
i][1][0] =
1810 s->last_mv[
i][1][1] = 0;
1817 s->mb_skipped =
s->next_picture.mbskip_table[
s->mb_y *
s->mb_stride +
s->mb_x];
1819 if (
s->mb_skipped) {
1821 for (
i = 0;
i < 6;
i++)
1822 s->block_last_index[
i] = -1;
1852 s->bdsp.clear_blocks(
s->block[0]);
1861 if (!
s->progressive_sequence) {
1889 s->last_mv[0][1][0] =
1890 s->last_mv[0][0][0] =
1891 s->mv[0][0][0] = mx;
1892 s->last_mv[0][1][1] =
1893 s->last_mv[0][0][1] =
1894 s->mv[0][0][1] = my;
1902 s->last_mv[1][1][0] =
1903 s->last_mv[1][0][0] =
1904 s->mv[1][0][0] = mx;
1905 s->last_mv[1][1][1] =
1906 s->last_mv[1][0][1] =
1907 s->mv[1][0][1] = my;
1915 for (
i = 0;
i < 2;
i++) {
1918 s->last_mv[0][
i][0] =
1919 s->mv[0][
i][0] = mx;
1920 s->last_mv[0][
i][1] = (
s->mv[0][
i][1] = my) * 2;
1927 for (
i = 0;
i < 2;
i++) {
1930 s->last_mv[1][
i][0] =
1931 s->mv[1][
i][0] = mx;
1932 s->last_mv[1][
i][1] = (
s->mv[1][
i][1] = my) * 2;
1950 s->current_picture.mb_type[xy] = mb_type;
1952 int use_intra_dc_vlc;
1958 "I cbpc damaged at %d %d\n",
s->mb_x,
s->mb_y);
1961 }
while (cbpc == 8);
1976 "I cbpy damaged at %d %d\n",
s->mb_x,
s->mb_y);
1979 cbp = (cbpc & 3) | (cbpy << 2);
1981 use_intra_dc_vlc =
s->qscale <
ctx->intra_dc_threshold;
1986 if (!
s->progressive_sequence)
1989 s->bdsp.clear_blocks(
s->block[0]);
1991 for (
i = 0;
i < 6;
i++) {
1993 1, use_intra_dc_vlc, 0) < 0)
2001 for (
i = 0;
i < 6;
i++) {
2011 if (
s->mb_x +
s->mb_y*
s->mb_width + 1 > next && (
s->avctx->err_recognition &
AV_EF_AGGRESSIVE)) {
2013 }
else if (
s->mb_x +
s->mb_y*
s->mb_width + 1 >= next)
2017 const int delta =
s->mb_x + 1 ==
s->mb_width ? 2 : 1;
2019 (
s->mb_x +
delta >=
s->mb_width)
2020 ?
FFMIN(
s->mb_y + 1,
s->mb_height - 1)
2022 if (
s->next_picture.mbskip_table[xy +
delta])
2073 int cc, dct_dc_size, dct_diff,
code, j, idx = 1, group = 0,
run = 0,
2074 additional_code_len, sign, mismatch;
2076 uint8_t *
const scantable =
s->intra_scantable.permutated;
2077 const uint16_t *quant_matrix;
2079 const int min = -1 * (1 << (
s->avctx->bits_per_raw_sample + 6));
2080 const int max = ((1 << (
s->avctx->bits_per_raw_sample + 6)) - 1);
2081 int shift = 3 -
s->dct_precision;
2090 quant_matrix =
s->intra_matrix;
2097 quant_matrix =
s->chroma_intra_matrix;
2100 if (dct_dc_size == 0) {
2105 if (dct_dc_size > 8) {
2112 s->last_dc[cc] += dct_diff;
2115 block[0] =
s->last_dc[cc] * (8 >>
s->intra_dc_precision);
2117 block[0] =
s->last_dc[cc] * (8 >>
s->intra_dc_precision) * (8 >>
s->dct_precision);
2121 mismatch ^=
block[0];
2138 }
else if (group >= 1 && group <= 6) {
2140 run = 1 << additional_code_len;
2141 if (additional_code_len)
2145 }
else if (group >= 7 && group <= 12) {
2150 run = (1 << (additional_code_len - 1)) +
code;
2154 j = scantable[idx++];
2155 block[j] = sign ? 1 : -1;
2156 }
else if (group >= 13 && group <= 20) {
2160 j = scantable[idx++];
2162 }
else if (group == 21) {
2166 j = scantable[idx++];
2167 additional_code_len =
s->avctx->bits_per_raw_sample +
s->dct_precision + 4;
2168 flc =
get_bits(&
s->gb, additional_code_len);
2169 if (flc >> (additional_code_len-1))
2170 block[j] = -1 * (( flc ^ ((1 << additional_code_len) -1)) + 1);
2176 mismatch ^=
block[j];
2179 block[63] ^= mismatch & 1;
2186 int i, j,
w,
h, idx = 0;
2187 int block_mean, rice_parameter, rice_prefix_code, rice_suffix_code,
2188 dpcm_residual,
left, top, topleft, min_left_top, max_left_top, p, p2,
output;
2189 h = 16 >> (n ?
s->chroma_y_shift : 0);
2190 w = 16 >> (n ?
s->chroma_x_shift : 0);
2192 block_mean =
get_bits(&
s->gb,
s->avctx->bits_per_raw_sample);
2193 if (block_mean == 0){
2197 s->last_dc[n] = block_mean * (1 << (
s->dct_precision +
s->intra_dc_precision));
2200 if (rice_parameter == 0) {
2205 if (rice_parameter == 15)
2208 if (rice_parameter > 11) {
2213 for (
i = 0;
i <
h;
i++) {
2214 output = 1 << (
s->avctx->bits_per_raw_sample - 1);
2215 top = 1 << (
s->avctx->bits_per_raw_sample - 1);
2217 for (j = 0; j <
w; j++) {
2224 if (rice_prefix_code == 11)
2225 dpcm_residual =
get_bits(&
s->gb,
s->avctx->bits_per_raw_sample);
2227 if (rice_prefix_code == 12) {
2231 rice_suffix_code =
get_bitsz(&
s->gb, rice_parameter);
2232 dpcm_residual = (rice_prefix_code << rice_parameter) + rice_suffix_code;
2236 if (dpcm_residual & 1)
2237 dpcm_residual = (-1 * dpcm_residual) >> 1;
2239 dpcm_residual = (dpcm_residual >> 1);
2242 top = macroblock[idx-
w];
2244 p =
left + top - topleft;
2246 if (p < min_left_top)
2250 if (p > max_left_top)
2253 p2 = (
FFMIN(min_left_top, topleft) +
FFMAX(max_left_top, topleft)) >> 1;
2258 dpcm_residual *= -1;
2260 macroblock[idx++] =
output = (dpcm_residual + p) & ((1 <<
s->avctx->bits_per_raw_sample) - 1);
2272 ctx->dpcm_direction = 0;
2293 for (
i = 0;
i < 3;
i++) {
2317 int hours, minutes, seconds;
2329 s->time_base = seconds + 60*(minutes + 60*hours);
2353 int visual_object_type;
2354 int is_visual_object_identifier =
get_bits1(gb);
2356 if (is_visual_object_identifier) {
2359 visual_object_type =
get_bits(gb, 4);
2364 if (video_signal_type) {
2365 int video_range, color_description;
2372 if (color_description) {
2373 s->avctx->color_primaries =
get_bits(gb, 8);
2388 for (
i = 0;
i < 64;
i++) {
2389 int j =
s->idsp.idct_permutation[
i];
2391 s->intra_matrix[j] = v;
2392 s->chroma_intra_matrix[j] = v;
2395 s->inter_matrix[j] = v;
2396 s->chroma_inter_matrix[j] = v;
2408 for (
i = 0;
i < 64;
i++) {
2411 s->intra_matrix[j] = v;
2412 s->chroma_intra_matrix[j] = v;
2420 for (
i = 0;
i < 64;
i++) {
2429 for (
i = 0;
i < 64;
i++) {
2432 s->chroma_intra_matrix[j] = v;
2440 for (
i = 0;
i < 64;
i++) {
2452 uint8_t extension_type;
2470 int bits_per_raw_sample;
2471 int rgb, chroma_format;
2491 bits_per_raw_sample =
get_bits(gb, 4);
2492 if (bits_per_raw_sample == 10) {
2502 if (
rgb !=
ctx->rgb ||
s->chroma_format != chroma_format)
2503 s->context_reinit = 1;
2504 s->avctx->bits_per_raw_sample = bits_per_raw_sample;
2506 s->chroma_format = chroma_format;
2509 check_marker(
s->avctx, gb,
"before video_object_layer_width");
2511 check_marker(
s->avctx, gb,
"before video_object_layer_height");
2513 check_marker(
s->avctx, gb,
"after video_object_layer_height");
2517 if (
s->width &&
s->height &&
2519 s->context_reinit = 1;
2524 aspect_ratio_info =
get_bits(gb, 4);
2526 s->avctx->sample_aspect_ratio.num =
get_bits(gb, 8);
2527 s->avctx->sample_aspect_ratio.den =
get_bits(gb, 8);
2537 check_marker(
s->avctx, gb,
"after first_half_vbv_buffer_size");
2540 check_marker(
s->avctx, gb,
"after first_half_vbv_buffer_size");
2542 check_marker(
s->avctx, gb,
"after latter_half_vbv_occupancy");
2570 s->studio_profile = 1;
2573 }
else if (
s->studio_profile) {
2583 aspect_ratio_info =
get_bits(gb, 4);
2585 s->avctx->sample_aspect_ratio.num =
get_bits(gb, 8);
2586 s->avctx->sample_aspect_ratio.den =
get_bits(gb, 8);
2592 int chroma_format =
get_bits(gb, 2);
2603 check_marker(
s->avctx, gb,
"after first_half_vbv_buffer_size");
2606 check_marker(
s->avctx, gb,
"after first_half_vbv_occupancy");
2608 check_marker(
s->avctx, gb,
"after latter_half_vbv_occupancy");
2613 if (
s->picture_number == 0) {
2614 switch (
ctx->vo_type) {
2633 check_marker(
s->avctx, gb,
"before time_increment_resolution");
2635 s->avctx->framerate.num =
get_bits(gb, 16);
2636 if (!
s->avctx->framerate.num) {
2641 ctx->time_increment_bits =
av_log2(
s->avctx->framerate.num - 1) + 1;
2642 if (
ctx->time_increment_bits < 1)
2643 ctx->time_increment_bits = 1;
2648 s->avctx->framerate.den =
get_bits(gb,
ctx->time_increment_bits);
2650 s->avctx->framerate.den = 1;
2662 !(
s->width &&
s->codec_tag ==
AV_RL32(
"MP4S"))) {
2663 if (
s->width &&
s->height &&
2665 s->context_reinit = 1;
2671 s->progressive_sequence =
2673 s->interlaced_dct = 0;
2676 "MPEG-4 OBMC not supported (very likely buggy encoder)\n");
2697 if (
ctx->num_sprite_warping_points > 3) {
2699 "%d sprite_warping_points\n",
2700 ctx->num_sprite_warping_points);
2701 ctx->num_sprite_warping_points = 0;
2715 if (
s->quant_precision != 5)
2717 "quant precision %d\n",
s->quant_precision);
2718 if (
s->quant_precision<3 ||
s->quant_precision>9) {
2719 s->quant_precision = 5;
2722 s->quant_precision = 5;
2735 for (
i = 0;
i < 64;
i++) {
2747 s->intra_matrix[j] = last;
2748 s->chroma_intra_matrix[j] = last;
2752 for (;
i < 64;
i++) {
2754 s->intra_matrix[j] = last;
2755 s->chroma_intra_matrix[j] = last;
2762 for (
i = 0;
i < 64;
i++) {
2774 s->inter_matrix[j] = v;
2775 s->chroma_inter_matrix[j] = v;
2779 for (;
i < 64;
i++) {
2781 s->inter_matrix[j] = last;
2782 s->chroma_inter_matrix[j] = last;
2792 s->quarter_sample = 0;
2801 int estimation_method =
get_bits(gb, 2);
2802 if (estimation_method < 2) {
2817 if (!
check_marker(
s->avctx, gb,
"in complexity estimation part 1")) {
2835 if (!
check_marker(
s->avctx, gb,
"in complexity estimation part 2")) {
2839 if (estimation_method == 1) {
2845 "Invalid Complexity estimation method %d\n",
2850 ctx->cplx_estimation_trash_i =
2851 ctx->cplx_estimation_trash_p =
2852 ctx->cplx_estimation_trash_b = 0;
2858 if (
s->data_partitioning)
2861 if (vo_ver_id != 1) {
2863 if (
ctx->new_pred) {
2870 "reduced resolution VOP not supported\n");
2877 if (
ctx->scalability) {
2879 int h_sampling_factor_n;
2880 int h_sampling_factor_m;
2881 int v_sampling_factor_n;
2882 int v_sampling_factor_m;
2887 h_sampling_factor_n =
get_bits(gb, 5);
2888 h_sampling_factor_m =
get_bits(gb, 5);
2889 v_sampling_factor_n =
get_bits(gb, 5);
2890 v_sampling_factor_m =
get_bits(gb, 5);
2893 if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
2894 v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
2897 ctx->scalability = 0;
2907 av_log(
s->avctx,
AV_LOG_DEBUG,
"tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n",
2908 s->avctx->framerate.den,
s->avctx->framerate.num,
2909 ctx->time_increment_bits,
2911 s->progressive_sequence,
2913 ctx->scalability ?
"scalability " :
"" ,
s->quarter_sample ?
"qpel " :
"",
2914 s->data_partitioning ?
"partition " :
"",
ctx->rvlc ?
"rvlc " :
""
2931 int ver = 0, build = 0, ver2 = 0, ver3 = 0;
2942 e = sscanf(buf,
"DivX%dBuild%d%c", &ver, &build, &last);
2944 e = sscanf(buf,
"DivX%db%d%c", &ver, &build, &last);
2946 ctx->divx_version = ver;
2947 ctx->divx_build = build;
2948 s->divx_packed = e == 3 && last ==
'p';
2952 e = sscanf(buf,
"FFmpe%*[^b]b%d", &build) + 3;
2954 e = sscanf(buf,
"FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
2956 e = sscanf(buf,
"Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
2958 if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) {
2960 "Unknown Lavc version string encountered, %d.%d.%d; "
2961 "clamping sub-version values to 8-bits.\n",
2964 build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF);
2968 if (strcmp(buf,
"ffmpeg") == 0)
2969 ctx->lavc_build = 4600;
2972 ctx->lavc_build = build;
2975 e = sscanf(buf,
"XviD%d", &build);
2977 ctx->xvid_build = build;
2987 if (
ctx->xvid_build == -1 &&
ctx->divx_version == -1 &&
ctx->lavc_build == -1) {
2988 if (
s->codec_tag ==
AV_RL32(
"XVID") ||
2993 ctx->xvid_build = 0;
2996 if (
ctx->xvid_build == -1 &&
ctx->divx_version == -1 &&
ctx->lavc_build == -1)
2997 if (
s->codec_tag ==
AV_RL32(
"DIVX") &&
ctx->vo_type == 0 &&
2998 ctx->vol_control_parameters == 0)
2999 ctx->divx_version = 400;
3001 if (
ctx->xvid_build >= 0 &&
ctx->divx_version >= 0) {
3003 ctx->divx_build = -1;
3007 if (
s->codec_tag ==
AV_RL32(
"XVIX"))
3010 if (
s->codec_tag ==
AV_RL32(
"UMP4"))
3013 if (
ctx->divx_version >= 500 &&
ctx->divx_build < 1814)
3016 if (
ctx->divx_version > 502 &&
ctx->divx_build < 1814)
3019 if (
ctx->xvid_build <= 3
U)
3020 s->padding_bug_score = 256 * 256 * 256 * 64;
3022 if (
ctx->xvid_build <= 1
U)
3025 if (
ctx->xvid_build <= 12
U)
3028 if (
ctx->xvid_build <= 32
U)
3031 #define SET_QPEL_FUNC(postfix1, postfix2) \
3032 s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \
3033 s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \
3034 s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2;
3036 if (
ctx->lavc_build < 4653
U)
3039 if (
ctx->lavc_build < 4655
U)
3042 if (
ctx->lavc_build < 4670
U)
3045 if (
ctx->lavc_build <= 4712
U)
3048 if ((
ctx->lavc_build&0xFF) >= 100) {
3049 if (
ctx->lavc_build > 3621476 &&
ctx->lavc_build < 3752552 &&
3050 (
ctx->lavc_build < 3752037 ||
ctx->lavc_build > 3752191)
3055 if (
ctx->divx_version >= 0)
3057 if (
ctx->divx_version == 501 &&
ctx->divx_build == 20020416)
3058 s->padding_bug_score = 256 * 256 * 256 * 64;
3060 if (
ctx->divx_version < 500
U)
3063 if (
ctx->divx_version >= 0)
3085 "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
3086 s->workaround_bugs,
ctx->lavc_build,
ctx->xvid_build,
3087 ctx->divx_version,
ctx->divx_build,
s->divx_packed ?
"p" :
"");
3089 if (CONFIG_MPEG4_DECODER &&
ctx->xvid_build >= 0 &&
3103 int time_incr, time_increment;
3115 if (
s->partitioned_frame)
3126 if (
ctx->time_increment_bits == 0 ||
3129 "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n",
ctx->time_increment_bits);
3131 for (
ctx->time_increment_bits = 1;
3132 ctx->time_increment_bits < 16;
3133 ctx->time_increment_bits++) {
3137 if ((
show_bits(gb,
ctx->time_increment_bits + 6) & 0x37) == 0x30)
3139 }
else if ((
show_bits(gb,
ctx->time_increment_bits + 5) & 0x1F) == 0x18)
3144 "time_increment_bits set to %d bits, based on bitstream analysis\n",
ctx->time_increment_bits);
3150 time_increment =
get_bits(gb,
ctx->time_increment_bits);
3153 s->last_time_base =
s->time_base;
3154 s->time_base += time_incr;
3155 s->time =
s->time_base * (
int64_t)
s->avctx->framerate.num + time_increment;
3157 if (
s->time <
s->last_non_b_time) {
3161 s->time +=
s->avctx->framerate.num;
3164 s->pp_time =
s->time -
s->last_non_b_time;
3165 s->last_non_b_time =
s->time;
3167 s->time = (
s->last_time_base + time_incr) * (
int64_t)
s->avctx->framerate.num + time_increment;
3168 s->pb_time =
s->pp_time - (
s->last_non_b_time -
s->time);
3169 if (
s->pp_time <=
s->pb_time ||
3170 s->pp_time <=
s->pp_time -
s->pb_time ||
3177 if (
ctx->t_frame == 0)
3178 ctx->t_frame =
s->pb_time;
3179 if (
ctx->t_frame == 0)
3185 if (
s->pp_field_time <=
s->pb_field_time ||
s->pb_field_time <= 1) {
3186 s->pb_field_time = 2;
3187 s->pp_field_time = 4;
3188 if (!
s->progressive_sequence)
3193 if (
s->avctx->framerate.den)
3251 if (!
s->progressive_sequence) {
3255 s->alternate_scan = 0;
3258 if (
s->alternate_scan) {
3262 s->idsp.idct_permutation);
3264 s->idsp.idct_permutation);
3269 s->idsp.idct_permutation);
3271 s->idsp.idct_permutation);
3285 if (
ctx->sprite_brightness_change)
3287 "sprite_brightness_change not supported\n");
3291 memset(
ctx->sprite_offset, 0,
sizeof(
ctx->sprite_offset));
3292 memset(
ctx->sprite_delta, 0,
sizeof(
ctx->sprite_delta));
3297 s->chroma_qscale =
s->qscale =
get_bits(gb,
s->quant_precision);
3298 if (
s->qscale == 0) {
3300 "Error, header damaged or not MPEG-4 header (qscale=0)\n");
3306 if (
s->f_code == 0) {
3308 "Error, header damaged or not MPEG-4 header (f_code=0)\n");
3317 if (
s->b_code == 0) {
3319 "Error, header damaged or not MPEG4 header (b_code=0)\n");
3328 "qp:%d fc:%d,%d %c size:%d pro:%d alt:%d top:%d %cpel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64
" tincr:%d\n",
3329 s->qscale,
s->f_code,
s->b_code,
3332 s->top_field_first,
s->quarter_sample ?
'q' :
'h',
3333 s->data_partitioning,
ctx->resync_marker,
3334 ctx->num_sprite_warping_points,
ctx->sprite_warping_accuracy,
3335 1 -
s->no_rounding,
ctx->vo_type,
3336 ctx->vol_control_parameters ?
" VOLC" :
" ",
ctx->intra_dc_threshold,
3337 ctx->cplx_estimation_trash_i,
ctx->cplx_estimation_trash_p,
3338 ctx->cplx_estimation_trash_b,
3344 if (!
ctx->scalability) {
3348 if (
ctx->enhancement_type) {
3349 int load_backward_shape =
get_bits1(gb);
3350 if (load_backward_shape)
3352 "load backward shape isn't supported\n");
3362 if (
ctx->vo_type == 0 &&
ctx->vol_control_parameters == 0 &&
3363 ctx->divx_version == -1 &&
s->picture_number == 0) {
3365 "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
3369 s->picture_number++;
3376 s->h_edge_pos =
s->width;
3377 s->v_edge_pos =
s->height;
3408 s->partitioned_frame = 0;
3409 s->interlaced_dct = 0;
3432 s->intra_dc_precision =
get_bits(gb, 2);
3436 if (
s->alternate_scan) {
3440 s->idsp.idct_permutation);
3442 s->idsp.idct_permutation);
3447 s->idsp.idct_permutation);
3449 s->idsp.idct_permutation);
3463 int visual_object_type;
3466 visual_object_type =
get_bits(gb, 4);
3489 int header,
int parse_only)
3492 unsigned startcode, v;
3502 if (!
s->studio_profile &&
s->avctx->bits_per_raw_sample != 8)
3503 s->avctx->bits_per_raw_sample = 0;
3515 (
ctx->divx_version >= 0 ||
ctx->xvid_build >= 0) ||
s->codec_tag ==
AV_RL32(
"QMP4")) {
3526 startcode = ((startcode << 8) | v) & 0xffffffff;
3528 if ((startcode & 0xFFFFFF00) != 0x100)
3533 if (startcode <= 0x11F)
3534 name =
"Video Object Start";
3535 else if (startcode <= 0x12F)
3536 name =
"Video Object Layer Start";
3537 else if (startcode <= 0x13F)
3539 else if (startcode <= 0x15F)
3540 name =
"FGS bp start";
3541 else if (startcode <= 0x1AF)
3543 else if (startcode == 0x1B0)
3544 name =
"Visual Object Seq Start";
3545 else if (startcode == 0x1B1)
3546 name =
"Visual Object Seq End";
3547 else if (startcode == 0x1B2)
3549 else if (startcode == 0x1B3)
3550 name =
"Group of VOP start";
3551 else if (startcode == 0x1B4)
3552 name =
"Video Session Error";
3553 else if (startcode == 0x1B5)
3554 name =
"Visual Object Start";
3555 else if (startcode == 0x1B6)
3556 name =
"Video Object Plane start";
3557 else if (startcode == 0x1B7)
3558 name =
"slice start";
3559 else if (startcode == 0x1B8)
3560 name =
"extension start";
3561 else if (startcode == 0x1B9)
3563 else if (startcode == 0x1BA)
3564 name =
"FBA Object start";
3565 else if (startcode == 0x1BB)
3566 name =
"FBA Object Plane start";
3567 else if (startcode == 0x1BC)
3568 name =
"Mesh Object start";
3569 else if (startcode == 0x1BD)
3570 name =
"Mesh Object Plane start";
3571 else if (startcode == 0x1BE)
3572 name =
"Still Texture Object start";
3573 else if (startcode == 0x1BF)
3574 name =
"Texture Spatial Layer start";
3575 else if (startcode == 0x1C0)
3576 name =
"Texture SNR Layer start";
3577 else if (startcode == 0x1C1)
3578 name =
"Texture Tile start";
3579 else if (startcode == 0x1C2)
3580 name =
"Texture Shape Layer start";
3581 else if (startcode == 0x1C3)
3582 name =
"stuffing start";
3583 else if (startcode <= 0x1C5)
3585 else if (startcode <= 0x1FF)
3586 name =
"System start";
3591 if (startcode >= 0x120 && startcode <= 0x12F) {
3608 s->studio_profile = 1;
3611 }
else if (
s->studio_profile) {
3618 if (
s->studio_profile) {
3634 s->avctx->has_b_frames = !
s->low_delay;
3636 if (
s->studio_profile) {
3637 if (!
s->avctx->bits_per_raw_sample) {
3654 if (
s->divx_packed) {
3655 int current_pos =
s->gb.buffer ==
s->bitstream_buffer ? 0 : (
get_bits_count(&
s->gb) >> 3);
3656 int startcode_found = 0;
3658 if (buf_size - current_pos > 7) {
3661 for (
i = current_pos;
i < buf_size - 4;
i++)
3666 buf[
i + 3] == 0xB6) {
3667 startcode_found = !(buf[
i + 4] & 0x40);
3672 if (startcode_found) {
3673 if (!
ctx->showed_packed_warning) {
3675 "wasteful way to store B-frames ('packed B-frames'). "
3676 "Consider using the mpeg4_unpack_bframes bitstream filter without encoding but stream copy to fix it.\n");
3677 ctx->showed_packed_warning = 1;
3680 &
s->allocated_bitstream_buffer_size,
3681 buf_size - current_pos);
3682 if (!
s->bitstream_buffer) {
3683 s->bitstream_buffer_size = 0;
3686 memcpy(
s->bitstream_buffer, buf + current_pos,
3687 buf_size - current_pos);
3688 s->bitstream_buffer_size = buf_size - current_pos;
3695 #if CONFIG_MPEG4_DECODER
3702 int init =
s->m.context_initialized;
3710 s->time_increment_bits =
s1->time_increment_bits;
3711 s->shape =
s1->shape;
3712 s->vol_sprite_usage =
s1->vol_sprite_usage;
3713 s->sprite_brightness_change =
s1->sprite_brightness_change;
3714 s->sprite_warping_accuracy =
s1->sprite_warping_accuracy;
3715 s->num_sprite_warping_points =
s1->num_sprite_warping_points;
3716 s->m.data_partitioning =
s1->m.data_partitioning;
3718 s->resync_marker =
s1->resync_marker;
3719 s->t_frame =
s1->t_frame;
3720 s->new_pred =
s1->new_pred;
3721 s->enhancement_type =
s1->enhancement_type;
3722 s->scalability =
s1->scalability;
3723 s->intra_dc_threshold =
s1->intra_dc_threshold;
3724 s->divx_version =
s1->divx_version;
3725 s->divx_build =
s1->divx_build;
3726 s->xvid_build =
s1->xvid_build;
3727 s->lavc_build =
s1->lavc_build;
3728 s->vo_type =
s1->vo_type;
3729 s->showed_packed_warning =
s1->showed_packed_warning;
3730 s->vol_control_parameters =
s1->vol_control_parameters;
3731 s->cplx_estimation_trash_i =
s1->cplx_estimation_trash_i;
3732 s->cplx_estimation_trash_p =
s1->cplx_estimation_trash_p;
3733 s->cplx_estimation_trash_b =
s1->cplx_estimation_trash_b;
3736 memcpy(
s->sprite_shift,
s1->sprite_shift,
sizeof(
s1->sprite_shift));
3737 memcpy(
s->sprite_traj,
s1->sprite_traj,
sizeof(
s1->sprite_traj));
3739 if (!
init &&
s1->xvid_build >= 0)
3745 static int mpeg4_update_thread_context_for_user(
AVCodecContext *dst,
3758 static av_cold void mpeg4_init_static(
void)
3774 for (
unsigned i = 0;
i < 12;
i++) {
3812 ctx->lavc_build = -1;
3820 ctx->time_increment_bits = 4;
3840 #define OFFSET(x) offsetof(MpegEncContext, x)
3841 #define FLAGS AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY
3848 static const AVClass mpeg4_class = {
3873 .p.priv_class = &mpeg4_class,
3875 #if CONFIG_MPEG4_NVDEC_HWACCEL
3878 #if CONFIG_MPEG4_VAAPI_HWACCEL
3881 #if CONFIG_MPEG4_VDPAU_HWACCEL
3884 #if CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL
const AVProfile ff_mpeg4_video_profiles[]
#define MV_TYPE_16X16
1 vector for the whole mb
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
VLCElem ff_h263_cbpy_vlc[]
#define FF_ASPECT_EXTENDED
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_cold void ff_xvid_idct_init(IDCTDSPContext *c, AVCodecContext *avctx)
static const uint8_t mpeg4_block_count[4]
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
const uint8_t ff_sprite_trajectory_lens[15]
static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb, int parse_only)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
static int get_bits_count(const GetBitContext *s)
static const uint8_t ac_state_tab[22][2]
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
@ AVCOL_RANGE_JPEG
Full range content.
static int mpeg4_decode_studio_mb(MpegEncContext *s, int16_t block_[12][64])
int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx)
Decode the next video packet.
static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
#define CORE_STUDIO_VO_TYPE
static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
#define AV_LOG_VERBOSE
Detailed information.
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_init_block_index(MpegEncContext *s)
#define UPDATE_CACHE(name, gb)
const FFCodec ff_mpeg4_decoder
#define FF_BUG_HPEL_CHROMA
static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb)
av_cold void ff_mpeg4_init_rl_intra(void)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
#define FF_DEBUG_PICT_INFO
#define GET_CACHE(name, gb)
static void skip_bits(GetBitContext *s, int n)
static VLCElem studio_luma_dc[528]
int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
Decode the user data stuff in the header.
#define SKIP_CACHE(name, gb, num)
AVCodec p
The public AVCodec.
static int get_amv(Mpeg4DecContext *ctx, int n)
Get the average motion vector for a GMC MB.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
#define MB_TYPE_B_VLC_BITS
#define STUDIO_INTRA_BITS
#define FF_BUG_QPEL_CHROMA2
#define AV_PIX_FMT_GBRP10
void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
Predict the ac.
const AVRational ff_h263_pixel_aspect[16]
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
#define AV_EF_BITSTREAM
detect bitstream specification deviations
static const VLCElem * studio_intra_tab[12]
#define USES_LIST(a, list)
static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int *profile, int *level)
#define HWACCEL_VDPAU(codec)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
#define VOT_STILL_TEXTURE_ID
#define SLICE_END
end marker found
#define AV_PIX_FMT_YUV444P10
int ff_h263_decode_motion(MpegEncContext *s, int pred, int f_code)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define INIT_FIRST_VLC_RL(rl, static_size)
int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
Decode the first and second partition.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define AV_PROFILE_UNKNOWN
static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id)
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
static VLCElem sprite_trajectory[128]
#define CLOSE_READER(name, gb)
#define FF_CODEC_DECODE_CB(func)
int8_t * max_level[2]
encoding & decoding
#define SHOW_SBITS(name, gb, num)
#define FF_BUG_NO_PADDING
RLTable ff_mpeg4_rl_intra
#define AV_EF_IGNORE_ERR
ignore errors and continue
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
#define SKIP_BITS(name, gb, num)
#define FF_BUG_DIRECT_BLOCKSIZE
av_cold void ff_rl_init(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
Initialize index_run, max_level and max_run from n, last, table_vlc, table_run and table_level.
#define CODEC_LONG_NAME(str)
static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
static const VLCElem * rl_vlc[2]
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const uint8_t ff_mpeg4_DCtab_lum[13][2]
#define FRAME_SKIPPED
Return value for header parsers if frame is not coded.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static const int mb_type_b_map[4]
#define LIBAVUTIL_VERSION_INT
static void mpeg4_load_default_matrices(MpegEncContext *s)
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
decode partition C of one MB.
static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n)
struct AVCodecInternal * internal
Private context used for internal data.
#define SLICE_NOEND
no end marker or error found but mb count exceeded
#define ROUNDED_DIV(a, b)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
int ff_h263_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
#define LAST_SKIP_BITS(name, gb, num)
VLCElem ff_h263_intra_MCBPC_vlc[]
#define SET_QPEL_FUNC(postfix1, postfix2)
#define UPDATE_THREAD_CONTEXT(func)
const uint8_t ff_alternate_horizontal_scan[64]
#define AV_PIX_FMT_YUV422P10
static const AVOption mpeg4_options[]
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
#define VISUAL_OBJ_STARTCODE
const uint8_t ff_mpeg4_dc_threshold[8]
av_cold int ff_h263_decode_end(AVCodecContext *avctx)
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
#define ADV_SIMPLE_VO_TYPE
static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
Decode first partition.
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
const uint8_t ff_mpeg4_y_dc_scale_table[32]
static int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int level, int *dir_ptr, int encoding)
Predict the dc.
static int shift(int a, int b)
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
static int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block, int n, int coded, int intra, int use_intra_dc_vlc, int rvlc)
Decode a block.
const int16_t ff_mpeg4_default_intra_matrix[64]
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
static int read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb)
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
int quarter_sample
1->qpel, 0->half pel ME/MC
static const uint8_t header[24]
#define MB_TYPE_INTERLACED
#define OPEN_READER(name, gb)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_mpeg4videodsp_init(Mpeg4VideoDSPContext *c)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const int16_t ff_mpeg4_default_non_intra_matrix[64]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
static void skip_bits1(GetBitContext *s)
#define HWACCEL_NVDEC(codec)
#define AV_LOG_INFO
Standard information.
static void gmc_motion(MpegEncContext *s, const Mpeg4DecContext *ctx, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
Decode the dc value.
#define SKIP_COUNTER(name, gb, num)
void ff_mpeg4_mcsel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const uint8_t ff_alternate_vertical_scan[64]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb, int header, int parse_only)
Decode MPEG-4 headers.
int8_t * max_run[2]
encoding & decoding
int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
Decode the next video packet.
#define INTRA_MCBPC_VLC_BITS
#define FF_BUG_AUTODETECT
autodetection
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static av_cold int decode_init(AVCodecContext *avctx)
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
const char * name
Name of the codec implementation.
av_cold int ff_h263_decode_init(AVCodecContext *avctx)
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define AV_PROFILE_MPEG4_SIMPLE_STUDIO
#define FF_BUG_QPEL_CHROMA
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define HWACCEL_VIDEOTOOLBOX(codec)
#define SPRITE_TRAJ_VLC_BITS
const uint8_t ff_mpeg4_studio_dc_luma[19][2]
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
const uint8_t ff_zigzag_direct[64]
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
const uint8_t ff_mpeg4_studio_dc_chroma[19][2]
static const uint8_t * align_get_bits(GetBitContext *s)
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
static VLCElem mb_type_b_vlc[16]
main external API structure.
static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
decode second partition.
#define SHOW_UBITS(name, gb, num)
const uint8_t ff_mb_type_b_tab[4][2]
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
#define VLC_INIT_STATIC_TABLE(vlc_table, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
#define USER_DATA_STARTCODE
static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
static int mpeg4_is_resync(Mpeg4DecContext *ctx)
check if the next stuff is a resync marker or the end.
static VLCElem studio_chroma_dc[528]
static int mpeg4_decode_dpcm_macroblock(MpegEncContext *s, int16_t macroblock[256], int n)
@ AV_PICTURE_TYPE_P
Predicted.
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Undefined Behavior In the C some operations are like signed integer overflow
#define FF_BUG_XVID_ILACE
VLCElem ff_h263_inter_MCBPC_vlc[]
#define avpriv_request_sample(...)
static void reset_studio_dc_predictors(MpegEncContext *s)
const uint8_t ff_mpeg4_c_dc_scale_table[32]
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
#define VLC_INIT_RL(rl, static_size)
static void next_start_code_studio(GetBitContext *gb)
#define VLC_INIT_STATE(_table)
static const int16_t alpha[]
static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
Decode the next studio vop header.
static VLCElem dc_chrom[512]
static int mpeg_get_qscale(MpegEncContext *s)
#define HWACCEL_VAAPI(codec)
static VLCElem dc_lum[512]
static void gmc1_motion(MpegEncContext *s, const Mpeg4DecContext *ctx, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
static const SheerTable rgb[2]
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
The exact code depends on how similar the blocks are and how related they are to the block
#define UPDATE_THREAD_CONTEXT_FOR_USER(func)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb)
#define QUANT_MATRIX_EXT_ID
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
RL_VLC_ELEM * rl_vlc[32]
decoding only
#define INTER_MCBPC_VLC_BITS
#define SIMPLE_STUDIO_VO_TYPE
const uint8_t ff_mpeg4_studio_intra[12][24][2]