Go to the documentation of this file.
25 #include "config_components.h"
76 if (!
s->context_initialized) {
79 memcpy(
s,
s1,
sizeof(*
s));
81 s->context_initialized = 0;
82 s->context_reinit = 0;
85 s->bitstream_buffer =
NULL;
86 s->bitstream_buffer_size =
s->allocated_bitstream_buffer_size = 0;
88 if (
s1->context_initialized) {
94 if (
s->height !=
s1->height ||
s->width !=
s1->width ||
s->context_reinit) {
95 s->height =
s1->height;
101 s->quarter_sample =
s1->quarter_sample;
103 s->coded_picture_number =
s1->coded_picture_number;
104 s->picture_number =
s1->picture_number;
110 if (
s1->picture &&
s1->picture[
i].f->buf[0] &&
115 #define UPDATE_PICTURE(pic)\
117 ff_mpeg_unref_picture(&s->pic);\
118 if (s1->pic.f && s1->pic.f->buf[0])\
119 ret = ff_mpeg_ref_picture(&s->pic, &s1->pic);\
121 ret = ff_update_picture_tables(&s->pic, &s1->pic);\
130 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
131 ((pic && pic >= old_ctx->picture && \
132 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
133 &new_ctx->picture[pic - old_ctx->picture] : NULL)
140 s->workaround_bugs =
s1->workaround_bugs;
141 s->padding_bug_score =
s1->padding_bug_score;
144 memcpy(&
s->last_time_base, &
s1->last_time_base,
145 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
146 (
char *) &
s1->last_time_base);
149 s->max_b_frames =
s1->max_b_frames;
150 s->low_delay =
s1->low_delay;
151 s->droppable =
s1->droppable;
154 s->divx_packed =
s1->divx_packed;
156 if (
s1->bitstream_buffer) {
158 &
s->allocated_bitstream_buffer_size,
159 s1->bitstream_buffer_size);
160 if (!
s->bitstream_buffer) {
161 s->bitstream_buffer_size = 0;
164 s->bitstream_buffer_size =
s1->bitstream_buffer_size;
165 memcpy(
s->bitstream_buffer,
s1->bitstream_buffer,
166 s1->bitstream_buffer_size);
170 if (!
s->sc.edge_emu_buffer)
173 &
s->sc,
s1->linesize) < 0) {
175 "scratch buffers.\n");
180 "be allocated due to unknown size.\n");
184 memcpy(&
s->progressive_sequence, &
s1->progressive_sequence,
185 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
194 if (!
s->context_initialized)
201 s->picture[
i].needs_realloc = 1;
203 s->last_picture_ptr =
204 s->next_picture_ptr =
205 s->current_picture_ptr =
NULL;
207 if ((
s->width ||
s->height) &&
221 memset(
s->thread_context, 0,
sizeof(
s->thread_context));
222 s->thread_context[0] =
s;
224 if (
s->width &&
s->height) {
229 s->context_reinit = 0;
234 s->context_reinit = 1;
266 s->mb_stride,
s->mb_width,
s->mb_height,
s->b8_stride,
267 &
s->linesize, &
s->uvlinesize);
275 int h_chroma_shift, v_chroma_shift;
309 s->last_picture_ptr !=
s->next_picture_ptr &&
310 s->last_picture_ptr->f->buf[0]) {
316 if (!
s->picture[
i].reference ||
317 (&
s->picture[
i] !=
s->last_picture_ptr &&
318 &
s->picture[
i] !=
s->next_picture_ptr &&
319 !
s->picture[
i].needs_realloc)) {
328 if (
s->current_picture_ptr && !
s->current_picture_ptr->f->buf[0]) {
331 pic =
s->current_picture_ptr;
338 pic = &
s->picture[idx];
350 s->current_picture_ptr = pic;
360 !
s->progressive_sequence);
361 s->current_picture_ptr->field_picture =
s->picture_structure !=
PICT_FRAME;
363 s->current_picture_ptr->f->pict_type =
s->pict_type;
370 s->current_picture_ptr)) < 0)
374 s->last_picture_ptr =
s->next_picture_ptr;
376 s->next_picture_ptr =
s->current_picture_ptr;
378 ff_dlog(
s->avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
379 s->last_picture_ptr,
s->next_picture_ptr,
s->current_picture_ptr,
380 s->last_picture_ptr ?
s->last_picture_ptr->f->data[0] :
NULL,
381 s->next_picture_ptr ?
s->next_picture_ptr->f->data[0] :
NULL,
382 s->current_picture_ptr ?
s->current_picture_ptr->f->data[0] :
NULL,
383 s->pict_type,
s->droppable);
385 if ((!
s->last_picture_ptr || !
s->last_picture_ptr->f->buf[0]) &&
389 "allocating dummy last picture for B frame\n");
392 "warning: first frame is no keyframe\n");
400 s->last_picture_ptr = &
s->picture[idx];
402 s->last_picture_ptr->reference = 3;
407 s->last_picture_ptr =
NULL;
419 if ((!
s->next_picture_ptr || !
s->next_picture_ptr->f->buf[0]) &&
427 s->next_picture_ptr = &
s->picture[idx];
429 s->next_picture_ptr->reference = 3;
434 s->next_picture_ptr =
NULL;
441 if (
s->last_picture_ptr) {
442 if (
s->last_picture_ptr->f->buf[0] &&
444 s->last_picture_ptr)) < 0)
447 if (
s->next_picture_ptr) {
448 if (
s->next_picture_ptr->f->buf[0] &&
450 s->next_picture_ptr)) < 0)
455 s->last_picture_ptr->f->buf[0]));
458 for (
int i = 0;
i < 4;
i++) {
460 s->current_picture.f->data[
i] =
FF_PTR_ADD(
s->current_picture.f->data[
i],
461 s->current_picture.f->linesize[
i]);
463 s->current_picture.f->linesize[
i] *= 2;
464 s->last_picture.f->linesize[
i] *= 2;
465 s->next_picture.f->linesize[
i] *= 2;
473 s->dct_unquantize_intra =
s->dct_unquantize_mpeg2_intra;
474 s->dct_unquantize_inter =
s->dct_unquantize_mpeg2_inter;
476 s->dct_unquantize_intra =
s->dct_unquantize_h263_intra;
477 s->dct_unquantize_inter =
s->dct_unquantize_h263_inter;
479 s->dct_unquantize_intra =
s->dct_unquantize_mpeg1_intra;
480 s->dct_unquantize_inter =
s->dct_unquantize_mpeg1_inter;
494 if (
s->current_picture.reference)
502 s->mb_width,
s->mb_height,
s->mb_stride,
s->quarter_sample);
538 s->last_picture_ptr ?
s->last_picture_ptr->f :
NULL,
539 y,
h,
s->picture_structure,
540 s->first_field,
s->low_delay);
552 s->current_picture_ptr =
s->last_picture_ptr =
s->next_picture_ptr =
NULL;
558 s->mb_x =
s->mb_y = 0;
560 s->bitstream_buffer_size = 0;
572 uint8_t *
dest,
const uint8_t *
src,
574 int src_x,
int src_y,
578 int motion_x,
int motion_y)
580 const int lowres =
s->avctx->lowres;
582 const int s_mask = (2 <<
lowres) - 1;
586 if (
s->quarter_sample) {
591 sx = motion_x & s_mask;
592 sy = motion_y & s_mask;
593 src_x += motion_x >>
lowres + 1;
594 src_y += motion_y >>
lowres + 1;
600 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer,
src,
601 s->linesize,
s->linesize,
602 w + 1, (
h + 1) << field_based,
603 src_x, src_y * (1 << field_based),
605 src =
s->sc.edge_emu_buffer;
625 uint8_t *
const *ref_picture,
627 int motion_x,
int motion_y,
630 const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
631 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
633 const int lowres =
s->avctx->lowres;
634 const int op_index =
FFMIN(
lowres - 1 +
s->chroma_x_shift, 3);
635 const int block_s = 8 >>
lowres;
636 const int s_mask = (2 <<
lowres) - 1;
639 int hc =
s->chroma_y_shift ? (
h+1-bottom_field)>>1 :
h;
640 linesize =
s->current_picture.f->linesize[0] << field_based;
641 uvlinesize =
s->current_picture.f->linesize[1] << field_based;
644 if (
s->quarter_sample) {
653 sx = motion_x & s_mask;
654 sy = motion_y & s_mask;
655 src_x =
s->mb_x * 2 * block_s + (motion_x >>
lowres + 1);
656 src_y = (
mb_y * 2 * block_s >> field_based) + (motion_y >>
lowres + 1);
659 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
660 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
661 uvsrc_x = src_x >> 1;
662 uvsrc_y = src_y >> 1;
667 uvsx = (2 * mx) & s_mask;
668 uvsy = (2 * my) & s_mask;
669 uvsrc_x =
s->mb_x * block_s + (mx >>
lowres);
672 if (
s->chroma_y_shift) {
677 uvsrc_x =
s->mb_x * block_s + (mx >>
lowres + 1);
678 uvsrc_y = (
mb_y * block_s >> field_based) + (my >>
lowres + 1);
680 if (
s->chroma_x_shift) {
684 uvsy = motion_y & s_mask;
686 uvsrc_x =
s->mb_x*block_s + (mx >> (
lowres+1));
689 uvsx = motion_x & s_mask;
690 uvsy = motion_y & s_mask;
697 ptr_y = ref_picture[0] + src_y *
linesize + src_x;
698 ptr_cb = ref_picture[1] + uvsrc_y *
uvlinesize + uvsrc_x;
699 ptr_cr = ref_picture[2] + uvsrc_y *
uvlinesize + uvsrc_x;
701 if ((
unsigned) src_x >
FFMAX(
h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
703 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr_y,
705 17, 17 + field_based,
706 src_x, src_y * (1 << field_based),
h_edge_pos,
708 ptr_y =
s->sc.edge_emu_buffer;
710 uint8_t *ubuf =
s->sc.edge_emu_buffer + 18 *
s->linesize;
711 uint8_t *vbuf =ubuf + 10 *
s->uvlinesize;
713 vbuf -=
s->uvlinesize;
714 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
717 uvsrc_x, uvsrc_y * (1 << field_based),
719 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
722 uvsrc_x, uvsrc_y * (1 << field_based),
731 dest_y +=
s->linesize;
732 dest_cb +=
s->uvlinesize;
733 dest_cr +=
s->uvlinesize;
737 ptr_y +=
s->linesize;
738 ptr_cb +=
s->uvlinesize;
739 ptr_cr +=
s->uvlinesize;
747 uvsx = (uvsx << 2) >>
lowres;
748 uvsy = (uvsy << 2) >>
lowres;
750 pix_op[op_index](dest_cb, ptr_cb,
uvlinesize, hc, uvsx, uvsy);
751 pix_op[op_index](dest_cr, ptr_cr,
uvlinesize, hc, uvsx, uvsy);
758 uint8_t *dest_cb, uint8_t *dest_cr,
759 uint8_t *
const *ref_picture,
763 const int lowres =
s->avctx->lowres;
765 const int block_s = 8 >>
lowres;
766 const int s_mask = (2 <<
lowres) - 1;
769 int emu = 0, src_x, src_y, sx, sy;
773 if (
s->quarter_sample) {
785 src_x =
s->mb_x * block_s + (mx >>
lowres + 1);
786 src_y =
s->mb_y * block_s + (my >>
lowres + 1);
788 offset = src_y *
s->uvlinesize + src_x;
789 ptr = ref_picture[1] +
offset;
792 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
793 s->uvlinesize,
s->uvlinesize,
796 ptr =
s->sc.edge_emu_buffer;
801 pix_op[op_index](dest_cb, ptr,
s->uvlinesize, block_s, sx, sy);
803 ptr = ref_picture[2] +
offset;
805 s->vdsp.emulated_edge_mc(
s->sc.edge_emu_buffer, ptr,
806 s->uvlinesize,
s->uvlinesize,
809 ptr =
s->sc.edge_emu_buffer;
811 pix_op[op_index](dest_cr, ptr,
s->uvlinesize, block_s, sx, sy);
826 uint8_t *dest_y, uint8_t *dest_cb,
828 int dir, uint8_t *
const *ref_picture,
833 const int lowres =
s->avctx->lowres;
834 const int block_s = 8 >>
lowres;
839 switch (
s->mv_type) {
844 s->mv[dir][0][0],
s->mv[dir][0][1],
850 for (
int i = 0;
i < 4;
i++) {
852 s->linesize) * block_s,
853 ref_picture[0], 0, 0,
854 (2 *
mb_x + (
i & 1)) * block_s,
855 (2 *
mb_y + (
i >> 1)) * block_s,
856 s->width,
s->height,
s->linesize,
858 block_s, block_s, pix_op,
859 s->mv[dir][
i][0],
s->mv[dir][
i][1]);
861 mx +=
s->mv[dir][
i][0];
862 my +=
s->mv[dir][
i][1];
873 1, 0,
s->field_select[dir][0],
875 s->mv[dir][0][0],
s->mv[dir][0][1],
879 1, 1,
s->field_select[dir][1],
881 s->mv[dir][1][0],
s->mv[dir][1][1],
884 if (
s->picture_structure !=
s->field_select[dir][0] + 1 &&
s->pict_type !=
AV_PICTURE_TYPE_B && !
s->first_field
885 || !ref_picture[0]) {
886 ref_picture =
s->current_picture_ptr->f->data;
889 0, 0,
s->field_select[dir][0],
892 s->mv[dir][0][1], 2 * block_s,
mb_y >> 1);
896 for (
int i = 0;
i < 2;
i++) {
897 uint8_t *
const *ref2picture;
899 if ((
s->picture_structure ==
s->field_select[dir][
i] + 1 ||
902 ref2picture = ref_picture;
904 ref2picture =
s->current_picture_ptr->f->data;
908 0, 0,
s->field_select[dir][
i],
910 s->mv[dir][
i][0],
s->mv[dir][
i][1] +
911 2 * block_s *
i, block_s,
mb_y >> 1);
913 dest_y += 2 * block_s *
s->linesize;
914 dest_cb += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
915 dest_cr += (2 * block_s >>
s->chroma_y_shift) *
s->uvlinesize;
920 for (
int i = 0;
i < 2;
i++) {
921 for (
int j = 0; j < 2; j++) {
925 s->mv[dir][2 *
i + j][0],
926 s->mv[dir][2 *
i + j][1],
929 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
932 if (!ref_picture[0]) {
933 ref_picture =
s->current_picture_ptr->f->data;
935 for (
int i = 0;
i < 2;
i++) {
937 0, 0,
s->picture_structure !=
i + 1,
939 s->mv[dir][2 *
i][0],
s->mv[dir][2 *
i][1],
940 2 * block_s,
mb_y >> 1);
943 pix_op =
s->h264chroma.avg_h264_chroma_pixels_tab;
947 if (!
s->first_field) {
948 ref_picture =
s->current_picture_ptr->f->data;
963 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !
s->quarter_sample;
969 switch (
s->mv_type) {
983 for (
int i = 0;
i < mvs;
i++) {
984 int my =
s->mv[dir][
i][1];
985 my_max =
FFMAX(my_max, my);
986 my_min =
FFMIN(my_min, my);
989 off = ((
FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
991 return av_clip(
s->mb_y + off, 0,
s->mb_height - 1);
993 return s->mb_height - 1;
998 int16_t *
block,
int i, uint8_t *
dest,
int line_size)
1000 if (
s->block_last_index[
i] >= 0) {
1005 #define IS_ENCODER 0
1013 for (
int i = 0;
i < 6;
i++) {
1014 for (
int j = 0; j < 64; j++) {
1016 block[
i][
s->idsp.idct_permutation[j]]);
1022 if (!
s->avctx->lowres) {
static int ff_h263_round_chroma(int x)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define MV_TYPE_16X16
1 vector for the whole mb
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
int ff_thread_can_start_frame(AVCodecContext *avctx)
int alloc_mb_width
mb_width used to allocate tables
Picture current_picture
copy of the current picture structure.
This structure describes decoded (raw) audio or video data.
void ff_mpv_report_decode_progress(MpegEncContext *s)
enum AVPictureType last_picture
Picture next_picture
copy of the next picture structure.
unsigned int ff_toupper4(unsigned int x)
#define PICT_BOTTOM_FIELD
struct AVCodecContext * avctx
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
@ AV_VIDEO_ENC_PARAMS_MPEG2
#define DEFINITELY_MPEG12
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
const struct AVCodec * codec
ptrdiff_t linesize
line size, in bytes, may be different from width
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Video encoding parameters for a given frame.
#define MAX_PICTURE_COUNT
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int ff_mpv_common_frame_size_change(MpegEncContext *s)
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define UPDATE_PICTURE(pic)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
@ AV_PICTURE_TYPE_I
Intra.
#define FF_PTR_ADD(ptr, off)
#define FF_MPV_QSCALE_TYPE_MPEG1
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
#define FF_DEBUG_DCT_COEFF
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
int alloc_mb_height
mb_height used to allocate tables
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
void ff_mpeg_flush(AVCodecContext *avctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
int16_t(*[2] motion_val)[2]
static void color_frame(AVFrame *frame, int luma)
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
#define i(width, name, range_min, range_max)
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Data structure for storing block-level encoding information.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
static int alloc_picture(MpegEncContext *s, Picture *pic)
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
main external API structure.
uint32_t * mb_type
types and macros are defined in mpegutils.h
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
void ff_mpv_frame_end(MpegEncContext *s)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
int coded_width
Bitstream width / height, may be different from width/height e.g.
@ AV_PICTURE_TYPE_P
Predicted.
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
int alloc_mb_stride
mb_stride used to allocate tables
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint8_t *mbskip_table, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.