Go to the documentation of this file.
35 return LCEVC_I420_10_LE;
44 return LCEVC_ColorFormat_Unknown;
50 LCEVC_PictureDesc
desc;
58 if (res != LCEVC_Success)
65 desc.sampleAspectRatioNum =
frame->sample_aspect_ratio.num;
66 desc.sampleAspectRatioDen =
frame->sample_aspect_ratio.den;
75 if (res != LCEVC_Success) {
83 LCEVC_PictureHandle *picture)
86 LCEVC_PictureDesc
desc ;
88 LCEVC_PicturePlaneDesc
planes[4] = { 0 };
92 if (res != LCEVC_Success)
96 for (
int i = 0;
i < 4;
i++) {
103 if (res != LCEVC_Success) {
113 LCEVC_PictureHandle picture;
114 LCEVC_ReturnCode res;
121 if (res != LCEVC_Success)
128 res = LCEVC_SendDecoderBase(lcevc->
decoder, in->
pts, picture, -1,
NULL);
129 if (res != LCEVC_Success) {
130 LCEVC_FreePicture(lcevc->
decoder, picture);
134 memset(&picture, 0,
sizeof(picture));
139 res = LCEVC_SendDecoderPicture(lcevc->
decoder, picture);
140 if (res != LCEVC_Success) {
141 LCEVC_FreePicture(lcevc->
decoder, picture);
151 LCEVC_PictureDesc
desc;
152 LCEVC_DecodeInformation
info;
153 LCEVC_PictureHandle picture;
154 LCEVC_ReturnCode res;
156 res = LCEVC_ReceiveDecoderPicture(lcevc->
decoder, &picture, &
info);
157 if (res != LCEVC_Success)
160 res = LCEVC_GetPictureDesc(lcevc->
decoder, picture, &
desc);
161 if (res != LCEVC_Success) {
162 LCEVC_FreePicture(lcevc->
decoder, picture);
167 out->crop_bottom =
desc.cropBottom;
168 out->crop_left =
desc.cropLeft;
169 out->crop_right =
desc.cropRight;
170 out->sample_aspect_ratio.num =
desc.sampleAspectRatioNum;
171 out->sample_aspect_ratio.den =
desc.sampleAspectRatioDen;
178 out->height =
desc.height +
out->crop_top +
out->crop_bottom;
180 res = LCEVC_FreePicture(lcevc->
decoder, picture);
181 if (res != LCEVC_Success)
190 LCEVC_PictureHandle picture;
191 LCEVC_ReturnCode res;
199 res = LCEVC_ReceiveDecoderBase (lcevc->
decoder, &picture);
200 if (res != LCEVC_Success && res != LCEVC_Again)
203 if (res == LCEVC_Again)
206 res = LCEVC_FreePicture(lcevc->
decoder, picture);
207 if (res != LCEVC_Success)
215 LCEVC_PictureHandle pic,
const LCEVC_DecodeInformation *
info,
216 const uint8_t *
data, uint32_t
size,
void *logctx)
231 LCEVC_DestroyDecoder(lcevc->
decoder);
232 memset(lcevc, 0,
sizeof(*lcevc));
237 LCEVC_AccelContextHandle
dummy = { 0 };
238 const int32_t event = LCEVC_Log;
240 if (LCEVC_CreateDecoder(&lcevc->
decoder,
dummy) != LCEVC_Success) {
245 LCEVC_ConfigureDecoderInt(lcevc->
decoder,
"log_level", 4);
246 LCEVC_ConfigureDecoderIntArray(lcevc->
decoder,
"events", 1, &event);
249 if (LCEVC_InitializeDecoder(lcevc->
decoder) != LCEVC_Success) {
251 LCEVC_DestroyDecoder(lcevc->
decoder);
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
int ff_lcevc_alloc(FFLCEVCContext **plcevc)
RefStruct is an API for creating reference-counted objects with minimal overhead.
#define AV_VIDEO_MAX_PLANES
Maximum number of planes in any pixel format.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int lcevc_init(FFLCEVCContext *lcevc, void *logctx)
#define AV_PIX_FMT_YUV420P10
LCEVC_DecoderHandle decoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_lcevc_process(void *logctx, AVFrame *frame)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * post_process_opaque
static void lcevc_free(AVRefStructOpaque unused, void *obj)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static LCEVC_ColorFormat map_format(int format)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
void ff_lcevc_unref(void *opaque)
static const struct @548 planes[]
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
static int alloc_base_frame(void *logctx, FFLCEVCContext *lcevc, const AVFrame *frame, LCEVC_PictureHandle *picture)
static void event_callback(LCEVC_DecoderHandle dec, LCEVC_Event event, LCEVC_PictureHandle pic, const LCEVC_DecodeInformation *info, const uint8_t *data, uint32_t size, void *logctx)
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
uintptr_t LCEVC_DecoderHandle
static int generate_output(void *logctx, FFLCEVCFrame *frame_ctx, AVFrame *out)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
#define AVERROR_EXTERNAL
Generic error in an external library.
static int alloc_enhanced_frame(void *logctx, FFLCEVCFrame *frame_ctx, LCEVC_PictureHandle *picture)
#define AV_LOG_INFO
Standard information.
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
#define i(width, name, range_min, range_max)
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int lcevc_send_frame(void *logctx, FFLCEVCFrame *frame_ctx, const AVFrame *in)
Structure to hold side data for an AVFrame.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static int lcevc_receive_frame(void *logctx, FFLCEVCFrame *frame_ctx, AVFrame *out)