Go to the documentation of this file.
   28 #define UNCHECKED_BITSTREAM_READER 1 
   64     return h && 
h->ps.sps ? 
h->ps.sps->num_reorder_frames : 0;
 
   69                               int mb_x, 
int mb_y, 
int mb_intra, 
int mb_skipped)
 
   76     sl->
mb_xy = mb_x + mb_y * 
h->mb_stride;
 
  109     int vshift = 
desc->log2_chroma_h;
 
  110     const int field_pic = 
h->picture_structure != 
PICT_FRAME;
 
  127         offset[2] = (y >> vshift) * 
src->linesize[1];
 
  134                                y, 
h->picture_structure, 
height);
 
  150     h->slice_table = 
NULL;
 
  161     for (
i = 0; 
i < 
h->nb_slice_ctx; 
i++) {
 
  183     const int big_mb_num = 
h->mb_stride * (
h->mb_height + 1);
 
  184     const int row_mb_num = 2*
h->mb_stride*
FFMAX(
h->nb_slice_ctx, 1);
 
  189     h->slice_ctx[0].intra4x4_pred_mode = 
h->intra4x4_pred_mode;
 
  194                       (big_mb_num + 
h->mb_stride) * 
sizeof(*
h->slice_table_base), 
fail)
 
  196                       big_mb_num * 
sizeof(uint16_t), 
fail)
 
  203     h->slice_ctx[0].mvd_table[0] = 
h->mvd_table[0];
 
  204     h->slice_ctx[0].mvd_table[1] = 
h->mvd_table[1];
 
  211     memset(
h->slice_table_base, -1,
 
  212            (big_mb_num + 
h->mb_stride) * 
sizeof(*
h->slice_table_base));
 
  213     h->slice_table = 
h->slice_table_base + 
h->mb_stride * 2 + 1;
 
  216                       big_mb_num * 
sizeof(uint32_t), 
fail);
 
  218                       big_mb_num * 
sizeof(uint32_t), 
fail);
 
  219     for (y = 0; y < 
h->mb_height; y++)
 
  220         for (x = 0; x < 
h->mb_width; x++) {
 
  221             const int mb_xy = x + y * 
h->mb_stride;
 
  222             const int b_xy  = 4 * x + 4 * y * 
h->b_stride;
 
  224             h->mb2b_xy[mb_xy]  = b_xy;
 
  225             h->mb2br_xy[mb_xy] = 8 * (
FMO ? mb_xy : (mb_xy % (2 * 
h->mb_stride)));
 
  241     int mb_array_size = 
h->mb_height * 
h->mb_stride;
 
  242     int y_size  = (2 * 
h->mb_width + 1) * (2 * 
h->mb_height + 1);
 
  243     int c_size  = 
h->mb_stride * (
h->mb_height + 1);
 
  244     int yc_size = y_size + 2   * c_size;
 
  254     if (sl != 
h->slice_ctx) {
 
  255         memset(er, 0, 
sizeof(*er));
 
  257     if (CONFIG_ERROR_RESILIENCE) {
 
  273                           (
h->mb_num + 1) * 
sizeof(
int), 
fail);
 
  275         for (y = 0; y < 
h->mb_height; y++)
 
  276             for (x = 0; x < 
h->mb_width; x++)
 
  279         er->
mb_index2xy[
h->mb_height * 
h->mb_width] = (
h->mb_height - 1) *
 
  280                                                       h->mb_stride + 
h->mb_width;
 
  286                          h->mb_height * 
h->mb_stride * (4*
sizeof(
int) + 1), 
fail);
 
  289                           yc_size * 
sizeof(int16_t), 
fail);
 
  293         for (
i = 0; 
i < yc_size; 
i++)
 
  308     h->cur_chroma_format_idc = -1;
 
  310     h->width_from_caller     = avctx->
width;
 
  311     h->height_from_caller    = avctx->
height;
 
  315     h->poc.prev_poc_msb      = 1 << 16;
 
  316     h->recovery_frame        = -1;
 
  317     h->frame_recovered       = 0;
 
  318     h->poc.prev_frame_num    = -1;
 
  319     h->sei.frame_packing.arrangement_cancel_flag = -1;
 
  320     h->sei.unregistered.x264_build = -1;
 
  322     h->next_outputed_poc = INT_MIN;
 
  324         h->last_pocs[
i] = INT_MIN;
 
  346     if (!
h->last_pic_for_ec.f)
 
  349     for (
i = 0; 
i < 
h->nb_slice_ctx; 
i++)
 
  350         h->slice_ctx[
i].h264 = 
h;
 
  367     memset(
h->delayed_pic, 0, 
sizeof(
h->delayed_pic));
 
  369     h->cur_pic_ptr = 
NULL;
 
  405         if(
h->avctx->time_base.den < INT_MAX/2) {
 
  406             h->avctx->time_base.den *= 2;
 
  408             h->avctx->time_base.num /= 2;
 
  415                                            &
h->ps, &
h->is_avc, &
h->nal_length_size,
 
  420                       "Error decoding the extradata\n");
 
  429     if (
h->ps.sps && 
h->ps.sps->bitstream_restriction_flag &&
 
  430         h->avctx->has_b_frames < 
h->ps.sps->num_reorder_frames) {
 
  431         h->avctx->has_b_frames = 
h->ps.sps->num_reorder_frames;
 
  441                "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. " 
  442                "Use it at your own risk\n");
 
  455     h->poc.prev_frame_num        =
 
  456     h->poc.prev_frame_num_offset = 0;
 
  457     h->poc.prev_poc_msb          = 1<<16;
 
  458     h->poc.prev_poc_lsb          = -1;
 
  460         h->last_pocs[
i] = INT_MIN;
 
  468     h->next_outputed_poc = INT_MIN;
 
  469     h->prev_interlaced_frame = 1;
 
  472     h->poc.prev_frame_num = -1;
 
  473     if (
h->cur_pic_ptr) {
 
  474         h->cur_pic_ptr->reference = 0;
 
  475         for (j=
i=0; 
h->delayed_pic[
i]; 
i++)
 
  476             if (
h->delayed_pic[
i] != 
h->cur_pic_ptr)
 
  477                 h->delayed_pic[j++] = 
h->delayed_pic[
i];
 
  478         h->delayed_pic[j] = 
NULL;
 
  483     h->recovery_frame = -1;
 
  484     h->frame_recovered = 0;
 
  485     h->current_slice = 0;
 
  494     memset(
h->delayed_pic, 0, 
sizeof(
h->delayed_pic));
 
  501     h->cur_pic_ptr = 
NULL;
 
  507     h->context_initialized = 0;
 
  516     for (
i = 0; 
i < 
h->pkt.nb_nals; 
i++) {
 
  542                 first_slice != nal->
type)
 
  545                 first_slice = nal->
type;
 
  591         h->current_slice = 0;
 
  592         if (!
h->first_field) {
 
  593             h->cur_pic_ptr = 
NULL;
 
  598     if (
h->nal_length_size == 4) {
 
  599         if (buf_size > 8 && 
AV_RB32(buf) == 1 && 
AV_RB32(buf+5) > (
unsigned)buf_size) {
 
  601         }
else if(buf_size > 3 && 
AV_RB32(buf) > 1 && 
AV_RB32(buf) <= (
unsigned)buf_size)
 
  609                "Error splitting the input into NAL units.\n");
 
  618     for (
i = 0; 
i < 
h->pkt.nb_nals; 
i++) {
 
  620         int max_slice_ctx, err;
 
  628         h->nal_unit_type = nal->
type;
 
  633             if ((nal->
data[1] & 0xFC) == 0x98) {
 
  635                 h->next_outputed_poc = INT_MIN;
 
  643             h->has_recovery_point = 1;
 
  653             if (
h->current_slice == 1) {
 
  655                     i >= nals_needed && !
h->setup_finished && 
h->cur_pic_ptr) {
 
  657                     h->setup_finished = 1;
 
  660                 if (
h->avctx->hwaccel &&
 
  661                     (
ret = 
h->avctx->hwaccel->start_frame(
h->avctx, buf, buf_size)) < 0)
 
  665             max_slice_ctx = avctx->
hwaccel ? 1 : 
h->nb_slice_ctx;
 
  666             if (
h->nb_slice_ctx_queued == max_slice_ctx) {
 
  667                 if (
h->avctx->hwaccel) {
 
  669                     h->nb_slice_ctx_queued = 0;
 
  683             h->has_recovery_point = 
h->has_recovery_point || 
h->sei.recovery_point.recovery_frame_cnt != -1;
 
  702                    "SPS decoding failure, trying again with the complete NAL\n");
 
  745     if ((ret < 0 || h->slice_ctx->er.error_occurred) && 
h->cur_pic_ptr) {
 
  752 #if CONFIG_ERROR_RESILIENCE 
  768         int use_last_pic = 
h->last_pic_for_ec.f->buf[0] && !sl->
ref_count[0];
 
  794     if (
h->cur_pic_ptr && !
h->droppable && 
h->has_slice) {
 
  799     return (
ret < 0) ? 
ret : buf_size;
 
  809     if (
pos + 10 > buf_size)
 
  834             const unsigned int block_idx = y * p->
mb_width + x;
 
  835             const unsigned int     mb_xy = y * p->
mb_stride + x;
 
  877     int cnt= buf[5]&0x1f;
 
  883         if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
 
  892         if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
 
  907         if (!
h->avctx->hwaccel &&
 
  908             (
out->field_poc[0] == INT_MAX ||
 
  909              out->field_poc[1] == INT_MAX)
 
  913             int field = 
out->field_poc[0] == INT_MAX;
 
  920             for (p = 0; p<4; p++) {
 
  921                 dst_data[p] = 
f->data[p] + (
field^1)*
f->linesize[p];
 
  922                 src_data[p] = 
f->data[p] +  
field   *
f->linesize[p];
 
  923                 linesizes[p] = 2*
f->linesize[p];
 
  927                           f->format, 
f->width, 
f->height>>1);
 
  936         if (CONFIG_MPEGVIDEO) {
 
  942                                  out->mb_width, 
out->mb_height, 
out->mb_stride, 1);
 
  950                                    int *got_frame, 
int buf_index)
 
  955     h->cur_pic_ptr = 
NULL;
 
  961          !
h->delayed_pic[
i]->f->key_frame &&
 
  962          !
h->delayed_pic[
i]->mmco_reset;
 
  964         if (
h->delayed_pic[
i]->poc < 
out->poc) {
 
  965             out     = 
h->delayed_pic[
i];
 
  969     for (
i = out_idx; 
h->delayed_pic[
i]; 
i++)
 
  970         h->delayed_pic[
i] = 
h->delayed_pic[
i + 1];
 
  986     int buf_size       = avpkt->
size;
 
  993     h->setup_finished = 0;
 
  994     h->nb_slice_ctx_queued = 0;
 
 1007                                      &
h->ps, &
h->is_avc, &
h->nal_length_size,
 
 1010     if (
h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
 
 1013                                             &
h->ps, &
h->is_avc, &
h->nal_length_size,
 
 1028             buf_size >= 4 && !memcmp(
"Q264", buf, 4))
 
 1035         (
h->mb_y >= 
h->mb_height && 
h->mb_height)) {
 
 1040         if (
h->next_output_pic) {
 
 1054 #define OFFSET(x) offsetof(H264Context, x) 
 1055 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 
 1058     { 
"nal_length_size", 
"nal_length_size", 
OFFSET(nal_length_size), 
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0 },
 
 1059     { 
"enable_er", 
"Enable error resilience on damaged frames (unsafe)", 
OFFSET(enable_er), 
AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, 
VD },
 
 1060     { 
"x264_build", 
"Assume this x264 version if no x264 version found in any SEI", 
OFFSET(x264_build), 
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, 
VD },
 
 1084 #if CONFIG_H264_DXVA2_HWACCEL 
 1087 #if CONFIG_H264_D3D11VA_HWACCEL 
 1090 #if CONFIG_H264_D3D11VA2_HWACCEL 
 1093 #if CONFIG_H264_NVDEC_HWACCEL 
 1096 #if CONFIG_H264_VAAPI_HWACCEL 
 1099 #if CONFIG_H264_VDPAU_HWACCEL 
 1102 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL 
  
int32_t qp
Base quantisation parameter for the frame.
 
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
 
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
 
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
 
#define AV_LOG_WARNING
Something somehow does not look correct.
 
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
 
int8_t ref_cache[2][5 *8]
 
void ff_h264_free_tables(H264Context *h)
 
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
 
static av_cold int init(AVCodecContext *avctx)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
 
const uint16_t ff_h264_mb_sizes[4]
 
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
 
static void idr(H264Context *h)
instantaneous decoder refresh.
 
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
 
static av_cold int h264_decode_init(AVCodecContext *avctx)
 
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
 
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
 
static const int8_t mv[256][2]
 
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
 
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
 
int ref_idc
H.264 only, nal_ref_idc.
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
static av_cold int end(AVCodecContext *avctx)
 
This structure describes decoded (raw) audio or video data.
 
#define HWACCEL_DXVA2(codec)
 
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
 
#define HWACCEL_D3D11VA2(codec)
 
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
 
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
 
int chroma_qp_index_offset[2]
 
void * av_mallocz_array(size_t nmemb, size_t size)
 
#define PICT_BOTTOM_FIELD
 
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
 
static int h264_export_enc_params(AVFrame *f, H264Picture *p)
 
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
 
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
 
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding.
 
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
 
#define FF_DECODE_ERROR_DECODE_SLICES
 
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
 
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
 
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
 
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
 
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
 
enum AVDiscard skip_frame
Skip decoding for selected frames.
 
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
 
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
 
@ AV_VIDEO_ENC_PARAMS_H264
H.264 stores:
 
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
 
int key_frame
1 -> keyframe, 0-> not
 
int flags
AV_CODEC_FLAG_*.
 
#define HWACCEL_VDPAU(codec)
 
void ff_h264_flush_change(H264Context *h)
 
static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
 
Video encoding parameters for a given frame.
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
 
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
 
static int ff_thread_once(char *control, void(*routine)(void))
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
 
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
 
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
 
static int get_last_needed_nal(H264Context *h)
 
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
 
static av_cold int h264_decode_end(AVCodecContext *avctx)
 
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
 
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
 
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
 
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
void ff_h264_remove_all_refs(H264Context *h)
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
 
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
 
int sei_recovery_frame_cnt
 
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
 
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
 
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
 
#define LIBAVUTIL_VERSION_INT
 
static const AVClass h264_class
 
Describe the class of an AVClass context structure.
 
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
 
static int is_extra(const uint8_t *buf, int buf_size)
 
uint8_t * edge_emu_buffer
 
int slice_flags
slice flags
 
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
 
struct AVCodecInternal * internal
Private context used for internal data.
 
const char * av_default_item_name(void *ptr)
Return the context name.
 
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
 
const AVProfile ff_h264_profiles[]
 
int init_qp
pic_init_qp_minus26 + 26
 
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
 
@ H264_NAL_AUXILIARY_SLICE
 
#define AV_EF_EXPLODE
abort decoding on minor error detection
 
int top_borders_allocated[2]
 
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
 
static AVOnce h264_vlc_init
 
const char * ff_h264_sei_stereo_mode(const H264SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
 
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
 
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
 
int flags2
AV_CODEC_FLAG2_*.
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
 
static const AVOption h264_options[]
 
#define MAX_DELAYED_PIC_COUNT
 
#define AV_NUM_DATA_POINTERS
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
 
uint8_t(*[2] top_borders)[(16 *3) *2]
 
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
 
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
 
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
 
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
 
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
 
#define HWACCEL_D3D11VA(codec)
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
 
#define HWACCEL_NVDEC(codec)
 
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
 
#define FF_THREAD_FRAME
Decode more than one frame at once.
 
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
 
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
 
#define i(width, name, range_min, range_max)
 
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
 
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
 
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
 
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
 
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
 
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
Data structure for storing block-level encoding information.
 
av_cold void ff_h264_decode_init_vlc(void)
 
const char * name
Name of the codec implementation.
 
#define PART_NOT_AVAILABLE
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
 
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
 
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
 
#define FF_DEBUG_GREEN_MD
 
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
 
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
 
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
 
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
 
main external API structure.
 
int active_thread_type
Which multithreading methods are in use by the codec.
 
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
 
uint8_t * bipred_scratchpad
 
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
 
AVDictionary * metadata
metadata.
 
static void fill_rectangle(int x, int y, int w, int h)
 
uint8_t * error_status_table
 
static const uint8_t scan8[16 *3+3]
 
static int ref[MAX_W *MAX_W]
 
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
 
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
 
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
 
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
 
static void h264_decode_flush(AVCodecContext *avctx)
 
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
 
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
 
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
 
#define avpriv_request_sample(...)
 
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
 
This structure stores compressed data.
 
void ff_er_frame_end(ERContext *s)
 
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
 
#define HWACCEL_VAAPI(codec)
 
int width
picture width / height.
 
int edge_emu_buffer_allocated
 
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
 
#define H264_MAX_PICTURE_COUNT
 
int bipred_scratchpad_allocated
 
@ AVDISCARD_NONREF
discard all non reference
 
static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame, int *got_frame, int buf_index)
 
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
 
int mb_field_decoding_flag