Go to the documentation of this file.
61 int x_cb = x0 >>
s->ps.sps->log2_ctb_size;
62 int y_cb = y0 >>
s->ps.sps->log2_ctb_size;
63 int pic_width_cb =
s->ps.sps->ctb_width;
64 int ctb_addr_ts =
s->ps.pps->ctb_addr_rs_to_ts[y_cb * pic_width_cb + x_cb];
65 return &
ref->rpl_tab[ctb_addr_ts]->refPicList[0];
89 if (
frame->frame->buf[0])
102 if (!
frame->tab_mvf_buf)
107 if (!
frame->rpl_tab_buf)
110 frame->ctb_count =
s->ps.sps->ctb_width *
s->ps.sps->ctb_height;
111 for (j = 0; j <
frame->ctb_count; j++)
117 if (
s->avctx->hwaccel) {
122 if (!
frame->hwaccel_priv_buf)
124 frame->hwaccel_picture_private =
frame->hwaccel_priv_buf->data;
146 if (
frame->frame->buf[0] &&
frame->sequence ==
s->seq_decode &&
161 if (
s->sh.pic_output_flag)
167 ref->sequence =
s->seq_decode;
168 ref->frame->crop_left =
s->ps.sps->output_window.left_offset;
169 ref->frame->crop_right =
s->ps.sps->output_window.right_offset;
170 ref->frame->crop_top =
s->ps.sps->output_window.top_offset;
171 ref->frame->crop_bottom =
s->ps.sps->output_window.bottom_offset;
180 int min_poc = INT_MAX;
183 if (
s->sh.no_output_of_prior_pics_flag == 1 &&
s->no_rasl_output_flag == 1) {
187 frame->sequence ==
s->seq_output) {
196 frame->sequence ==
s->seq_output) {
198 if (
frame->poc < min_poc || nb_output == 1) {
199 min_poc =
frame->poc;
206 if (!
flush &&
s->seq_output ==
s->seq_decode &&
s->ps.sps &&
207 nb_output <= s->ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].num_reorder_pics)
228 "Output frame with POC %d.\n",
frame->poc);
232 if (
s->seq_output !=
s->seq_decode)
233 s->seq_output = (
s->seq_output + 1) & 0xff;
244 int min_poc = INT_MAX;
249 if ((
frame->flags) &&
250 frame->sequence ==
s->seq_output &&
256 if (
s->ps.sps && dpb >=
s->ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].max_dec_pic_buffering) {
259 if ((
frame->flags) &&
260 frame->sequence ==
s->seq_output &&
263 min_poc =
frame->poc;
271 frame->sequence ==
s->seq_output &&
272 frame->poc <= min_poc) {
284 int ctb_count =
frame->ctb_count;
285 int ctb_addr_ts =
s->ps.pps->ctb_addr_rs_to_ts[
s->sh.slice_segment_addr];
291 for (
i = ctb_addr_ts;
i < ctb_count;
i++)
317 for (list_idx = 0; list_idx < nb_list; list_idx++) {
352 rpl->
ref[
i] = rpl_tmp.
ref[idx];
357 memcpy(rpl, &rpl_tmp,
sizeof(*rpl));
371 int mask = use_msb ? ~0 : (1 <<
s->ps.sps->log2_max_poc_lsb) - 1;
376 if (
ref->frame->buf[0] &&
ref->sequence ==
s->seq_decode) {
384 "Could not find ref with POC %d\n", poc);
403 if (!
s->avctx->hwaccel) {
404 if (!
s->ps.sps->pixel_shift) {
405 for (
i = 0;
frame->frame->data[
i];
i++)
406 memset(
frame->frame->data[
i], 1 << (
s->ps.sps->bit_depth - 1),
409 for (
i = 0;
frame->frame->data[
i];
i++)
410 for (y = 0; y < (
s->ps.sps->height >>
s->ps.sps->vshift[
i]); y++) {
411 uint8_t *dst =
frame->frame->data[
i] + y *
frame->frame->linesize[
i];
412 AV_WN16(dst, 1 << (
s->ps.sps->bit_depth - 1));
419 frame->sequence =
s->seq_decode;
430 int poc,
int ref_flag, uint8_t use_msb)
481 if (!short_rps->
used[
i])
483 else if (i < short_rps->num_negative_pics)
495 int poc = long_rps->
poc[
i];
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const RefPicList * ff_hevc_get_ref_list(const HEVCContext *s, const HEVCFrame *ref, int x0, int y0)
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
static HEVCFrame * find_ref_idx(HEVCContext *s, int poc, uint8_t use_msb)
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
This structure describes decoded (raw) audio or video data.
static int add_candidate_ref(HEVCContext *s, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
#define HEVC_FRAME_FLAG_LONG_REF
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
static void mark_ref(HEVCFrame *frame, int flag)
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define HEVC_FRAME_FLAG_BUMPING
static const uint16_t mask[17]
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static int init_slice_rpl(HEVCContext *s)
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define HEVC_FRAME_FLAG_SHORT_REF
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
struct HEVCFrame * ref[HEVC_MAX_REFS]
static HEVCFrame * generate_missing_ref(HEVCContext *s, int poc)
@ AV_PICTURE_STRUCTURE_BOTTOM_FIELD
static void flush(AVCodecContext *avctx)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AV_PICTURE_STRUCTURE_TOP_FIELD
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
uint8_t poc_msb_present[32]
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define HEVC_FRAME_FLAG_OUTPUT
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
static HEVCFrame * alloc_frame(HEVCContext *s)
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
#define FF_THREAD_FRAME
Decode more than one frame at once.
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
unsigned int num_negative_pics
#define i(width, name, range_min, range_max)
void ff_hevc_bump_frame(HEVCContext *s)
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
static int ref[MAX_W *MAX_W]
#define flags(name, subs,...)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int isLongTerm[HEVC_MAX_REFS]
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.