28 #ifndef AVCODEC_H264_MVPRED_H 29 #define AVCODEC_H264_MVPRED_H 41 int i,
int list,
int part_width)
43 const int topright_ref = sl->
ref_cache[
list][i - 8 + part_width];
48 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \ 49 const int xy = XY, y4 = Y4; \ 50 const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \ 51 if (!USES_LIST(mb_type, list)) \ 52 return LIST_NOT_USED; \ 53 mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \ 54 sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \ 55 sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \ 56 return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP; 59 && i >=
scan8[0] + 8 && (i & 7) == 4
68 (sl->
mb_y & 1) * 2 + (i >> 5));
99 int part_width,
int list,
int ref,
100 int *
const mx,
int *
const my)
102 const int index8 =
scan8[n];
108 int diagonal_ref, match_count;
110 av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
121 match_count = (diagonal_ref ==
ref) + (top_ref == ref) + (left_ref ==
ref);
122 ff_tlog(h->
avctx,
"pred_motion match_count=%d\n", match_count);
123 if (match_count > 1) {
126 }
else if (match_count == 1) {
127 if (left_ref == ref) {
130 }
else if (top_ref == ref) {
150 "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
151 top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
152 A[0], A[1], ref, *mx, *my, sl->
mb_x, sl->
mb_y, n, list);
164 int *
const mx,
int *
const my)
170 ff_tlog(h->
avctx,
"pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
171 top_ref, B[0], B[1], sl->
mb_x, sl->
mb_y, n, list);
173 if (top_ref == ref) {
182 ff_tlog(h->
avctx,
"pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
183 left_ref, A[0], A[1], sl->
mb_x, sl->
mb_y, n, list);
185 if (left_ref == ref) {
205 int *
const mx,
int *
const my)
211 ff_tlog(h->
avctx,
"pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
212 left_ref, A[0], A[1], sl->
mb_x, sl->
mb_y, n, list);
214 if (left_ref == ref) {
225 ff_tlog(h->
avctx,
"pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
226 diagonal_ref, C[0], C[1], sl->
mb_x, sl->
mb_y, n, list);
228 if (diagonal_ref == ref) {
239 #define FIX_MV_MBAFF(type, refn, mvn, idx) \ 240 if (FRAME_MBAFF(h)) { \ 241 if (MB_FIELD(sl)) { \ 242 if (!IS_INTERLACED(type)) { \ 244 AV_COPY32(mvbuf[idx], mvn); \ 245 mvbuf[idx][1] /= 2; \ 249 if (IS_INTERLACED(type)) { \ 251 AV_COPY32(mvbuf[idx], mvn); \ 252 mvbuf[idx][1] *= 2; \ 265 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
266 const int16_t *
A, *
B, *
C;
303 top_ref, left_ref, sl->
mb_x, sl->
mb_y);
328 match_count = !diagonal_ref + !top_ref + !left_ref;
329 ff_tlog(h->
avctx,
"pred_pskip_motion match_count=%d\n", match_count);
330 if (match_count > 1) {
333 }
else if (match_count == 1) {
337 }
else if (!top_ref) {
359 const int mb_xy = sl->
mb_xy;
360 int topleft_xy, top_xy, topright_xy, left_xy[
LEFT_MBS];
361 static const uint8_t left_block_options[4][32] = {
362 { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
363 { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
364 { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
365 { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
375 topleft_xy = top_xy - 1;
376 topright_xy = top_xy + 1;
377 left_xy[
LBOT] = left_xy[
LTOP] = mb_xy - 1;
383 if (left_mb_field_flag != curr_mb_field_flag) {
385 if (curr_mb_field_flag) {
397 if (curr_mb_field_flag) {
402 if (left_mb_field_flag != curr_mb_field_flag) {
403 if (curr_mb_field_flag) {
448 int topleft_xy, top_xy, topright_xy, left_xy[
LEFT_MBS];
449 int topleft_type, top_type, topright_type, left_type[
LEFT_MBS];
474 if (!(top_type & type_mask)) {
481 if (!(left_type[LTOP] & type_mask)) {
485 if (!(left_type[
LBOT] & type_mask)) {
493 if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
499 if (!(left_type[LTOP] & type_mask)) {
505 if (!(topleft_type & type_mask))
508 if (!(topright_type & type_mask))
520 for (i = 0; i < 2; i++) {
546 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
548 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
549 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
551 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
552 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
555 uint32_t top_empty =
CABAC(h) && !
IS_INTRA(mb_type) ? 0 : 0x40404040;
556 AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
557 AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
558 AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
561 for (i = 0; i < 2; i++) {
562 if (left_type[
LEFT(i)]) {
564 nnz_cache[3 + 8 * 1 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i]];
565 nnz_cache[3 + 8 * 2 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i]];
567 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] + 4 * 4];
568 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] + 4 * 4];
569 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] + 8 * 4];
570 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] + 8 * 4];
572 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] - 2 + 4 * 4];
573 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] - 2 + 4 * 4];
574 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] = nnz[left_block[8 + 0 + 2 *
i] - 2 + 8 * 4];
575 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] = nnz[left_block[8 + 1 + 2 *
i] - 2 + 8 * 4];
577 nnz_cache[3 + 8 * 6 + 8 *
i] = nnz[left_block[8 + 4 + 2 *
i]];
578 nnz_cache[3 + 8 * 11 + 8 *
i] = nnz[left_block[8 + 5 + 2 *
i]];
581 nnz_cache[3 + 8 * 1 + 2 * 8 *
i] =
582 nnz_cache[3 + 8 * 2 + 2 * 8 *
i] =
583 nnz_cache[3 + 8 * 6 + 2 * 8 *
i] =
584 nnz_cache[3 + 8 * 7 + 2 * 8 *
i] =
585 nnz_cache[3 + 8 * 11 + 2 * 8 *
i] =
586 nnz_cache[3 + 8 * 12 + 2 * 8 *
i] =
CABAC(h) && !
IS_INTRA(mb_type) ? 0 : 64;
597 if (left_type[
LTOP]) {
599 ((h->
cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
600 (((h->
cbp_table[left_xy[
LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
610 for (list = 0; list < sl->
list_count; list++) {
620 const int b_xy = h->
mb2b_xy[top_xy] + 3 * b_stride;
622 ref_cache[0 - 1 * 8] =
623 ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
624 ref_cache[2 - 1 * 8] =
625 ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
633 for (i = 0; i < 2; i++) {
634 int cache_idx = -1 + i * 2 * 8;
637 const int b8_xy = 4 * left_xy[
LEFT(i)] + 1;
639 mv[b_xy + b_stride * left_block[0 + i * 2]]);
641 mv[b_xy + b_stride * left_block[1 + i * 2]]);
642 ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
643 ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
647 ref_cache[cache_idx] =
655 const int b8_xy = 4 * left_xy[
LTOP] + 1;
656 AV_COPY32(mv_cache[-1],
mv[b_xy + b_stride * left_block[0]]);
657 ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
666 const int b_xy = h->
mb2b_xy[topright_xy] + 3 * b_stride;
668 ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
674 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
676 const int b_xy = h->
mb2b_xy[topleft_xy] + 3 + b_stride +
680 ref_cache[-1 - 1 * 8] = ref[b8_xy];
694 ref_cache[2 + 8 * 0] =
701 const int b_xy = h->
mb2br_xy[top_xy];
702 AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
708 AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
709 AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
716 AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
717 AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
732 }
else if (
IS_8X8(top_type)) {
733 int b8_xy = 4 * top_xy;
734 direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
735 direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
743 else if (
IS_8X8(left_type[LTOP]))
744 direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[
LTOP] + 1 + (left_block[0] & ~1)];
750 else if (
IS_8X8(left_type[LBOT]))
751 direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
759 MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \ 760 MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \ 761 MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \ 762 MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \ 763 MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \ 764 MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \ 765 MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \ 766 MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \ 767 MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \ 768 MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT]) 773 #define MAP_F2F(idx, mb_type) \ 774 if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \ 775 sl->ref_cache[list][idx] *= 2; \ 776 sl->mv_cache[list][idx][1] /= 2; \ 777 sl->mvd_cache[list][idx][1] >>= 1; \ 784 #define MAP_F2F(idx, mb_type) \ 785 if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \ 786 sl->ref_cache[list][idx] >>= 1; \ 787 sl->mv_cache[list][idx][1] *= 2; \ 788 sl->mvd_cache[list][idx][1] <<= 1; \ 806 const int mb_xy = sl->
mb_xy;
const uint8_t * left_block
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
int neighbor_transform_size
number of neighbors (top and/or left) that used 8x8 dct
#define FIX_MV_MBAFF(type, refn, mvn, idx)
unsigned int topleft_samples_available
uint8_t mvd_cache[2][5 *8][2]
int16_t(*[2] motion_val)[2]
void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type)
#define USES_LIST(a, list)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
static av_always_inline void write_back_motion(const H264Context *h, H264SliceContext *sl, int mb_type)
unsigned int topright_samples_available
static av_always_inline void pred_pskip_motion(const H264Context *const h, H264SliceContext *sl)
int constrained_intra_pred
constrained_intra_pred_flag
int8_t intra4x4_pred_mode_cache[5 *8]
static av_always_inline void pred_16x8_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 16x8 MV.
simple assert() macros that are a bit more flexible than ISO C assert().
int direct_spatial_mv_pred
unsigned int top_samples_available
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
static av_always_inline void pred_8x16_motion(const H264Context *const h, H264SliceContext *sl, int n, int list, int ref, int *const mx, int *const my)
Get the directionally predicted 8x16 MV.
H.264 / AVC / MPEG-4 part10 codec.
static void fill_rectangle(int x, int y, int w, int h)
#define MB_TYPE_INTERLACED
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
static const int8_t mv[256][2]
Libavcodec external API header.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static const uint8_t scan8[16 *3+3]
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
H264Picture * cur_pic_ptr
uint8_t direct_cache[5 *8]
static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
common internal api header.
static int ref[MAX_W *MAX_W]
static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
int8_t * intra4x4_pred_mode
int8_t ref_cache[2][5 *8]
static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl, const int16_t **C, int i, int list, int part_width)
#define PART_NOT_AVAILABLE
static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
decodes a P_SKIP or B_SKIP macroblock
unsigned int left_samples_available
uint8_t(*[2] mvd_table)[2]
uint8_t(* non_zero_count)[48]
mode
Use these values in ebur128_init (or'ed).
static av_always_inline void pred_motion(const H264Context *const h, H264SliceContext *sl, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.