Go to the documentation of this file.
106 "Left and right sizes differ (%dx%d vs %dx%d).\n",
112 "Left and right time bases differ (%d/%d vs %d/%d).\n",
113 time_base.
num, time_base.
den,
119 "Left and right framerates differ (%d/%d vs %d/%d).\n",
120 frame_rate.
num, frame_rate.
den,
129 s->depth =
s->pix_desc->comp[0].depth;
166 if (interleaved &&
s->depth <= 8) {
167 const uint8_t *leftp =
s->input_views[
LEFT]->data[0];
168 const uint8_t *rightp =
s->input_views[
RIGHT]->data[0];
169 uint8_t *dstp =
out->data[0];
170 int length =
out->width / 2;
171 int lines =
out->height;
173 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
174 if (plane == 1 || plane == 2) {
178 for (
i = 0;
i < lines;
i++) {
180 leftp =
s->input_views[
LEFT]->data[plane] +
181 s->input_views[
LEFT]->linesize[plane] *
i;
182 rightp =
s->input_views[
RIGHT]->data[plane] +
183 s->input_views[
RIGHT]->linesize[plane] *
i;
184 dstp =
out->data[plane] +
out->linesize[plane] *
i;
185 for (j = 0; j < length; j++) {
187 if ((
s->pix_desc->log2_chroma_w ||
188 s->pix_desc->log2_chroma_h) &&
189 (plane == 1 || plane == 2)) {
190 *dstp++ = (*leftp + *rightp) / 2;
191 *dstp++ = (*leftp + *rightp) / 2;
201 }
else if (interleaved &&
s->depth > 8) {
202 const uint16_t *leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[0];
203 const uint16_t *rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[0];
204 uint16_t *dstp = (uint16_t *)
out->data[0];
205 int length =
out->width / 2;
206 int lines =
out->height;
208 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
209 if (plane == 1 || plane == 2) {
213 for (
i = 0;
i < lines;
i++) {
215 leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[plane] +
216 s->input_views[
LEFT]->linesize[plane] *
i / 2;
217 rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[plane] +
218 s->input_views[
RIGHT]->linesize[plane] *
i / 2;
219 dstp = (uint16_t *)
out->data[plane] +
out->linesize[plane] *
i / 2;
220 for (j = 0; j < length; j++) {
222 if ((
s->pix_desc->log2_chroma_w ||
223 s->pix_desc->log2_chroma_h) &&
224 (plane == 1 || plane == 2)) {
225 *dstp++ = (*leftp + *rightp) / 2;
226 *dstp++ = (*leftp + *rightp) / 2;
237 for (
i = 0;
i < 2;
i++) {
238 const AVFrame *
const input_view =
s->input_views[
i];
239 const int psize = 1 + (
s->depth > 8);
241 int sub_w = psize * input_view->
width >>
s->pix_desc->log2_chroma_w;
244 dst[1] =
out->data[1] +
i * sub_w;
245 dst[2] =
out->data[2] +
i * sub_w;
264 for (
i = 0;
i < 2;
i++) {
265 const AVFrame *
const input_view =
s->input_views[
i];
268 int sub_h = input_view->
height >>
s->pix_desc->log2_chroma_h;
270 dst[0] =
out->data[0] +
i *
out->linesize[0] *
271 (interleaved + input_view->
height * (1 - interleaved));
272 dst[1] =
out->data[1] +
i *
out->linesize[1] *
273 (interleaved + sub_h * (1 - interleaved));
274 dst[2] =
out->data[2] +
i *
out->linesize[2] *
275 (interleaved + sub_h * (1 - interleaved));
277 linesizes[0] =
out->linesize[0] +
278 interleaved *
out->linesize[0];
279 linesizes[1] =
out->linesize[1] +
280 interleaved *
out->linesize[1];
281 linesizes[2] =
out->linesize[2] +
282 interleaved *
out->linesize[2];
321 if (!(
s->input_views[0] &&
s->input_views[1]))
326 for (
i = 0;
i < 2;
i++) {
337 stereo->
type =
s->format;
362 for (
i = 0;
i < 2;
i++)
371 stereo->
type =
s->format;
385 if (!
s->input_views[0]) {
391 if (!
s->input_views[1]) {
397 if (
s->input_views[0] &&
s->input_views[1])
404 !
s->input_views[0]) {
410 !
s->input_views[1]) {
418 #define OFFSET(x) offsetof(FramepackContext, x)
419 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
461 .priv_class = &framepack_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
AVPixelFormat
Pixel format.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FILTER_PIXFMTS_ARRAY(array)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static av_always_inline void spatial_frame_pack(AVFilterLink *outlink, AVFrame *dst)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static const AVFilterPad framepack_outputs[]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_PIX_FMT_YUVA422P9
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int try_push_frame(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Link properties exposed to filter code, but not external callers.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_PIX_FMT_YUVA420P9
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void * priv
private data for use by the filter
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void vertical_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
AVFILTER_DEFINE_CLASS(framepack)
#define AV_PIX_FMT_GRAY14
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
AVFrame * input_views[2]
input frames
Rational number (pair of numerator and denominator).
static void horizontal_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
static FilterLink * ff_filter_link(AVFilterLink *link)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
const AVFilter ff_vf_framepack
int format
agreed upon media format
#define AV_PIX_FMT_YUV422P12
#define AV_NOPTS_VALUE
Undefined timestamp value.
enum AVStereo3DType format
frame pack type output
#define AV_PIX_FMT_YUV444P12
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static int activate(AVFilterContext *ctx)
AVFilterContext * src
source filter
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
#define i(width, name, range_min, range_max)
static int config_output(AVFilterLink *outlink)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static av_cold void framepack_uninit(AVFilterContext *ctx)
static const AVOption framepack_options[]
#define AV_PIX_FMT_YUV444P9
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_STEREO3D_COLUMNS
Views are packed per column.
enum AVStereo3DType type
How views are packed within the video.
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
const AVPixFmtDescriptor * pix_desc
agreed pixel format
int h
agreed upon image height
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
#define AV_PIX_FMT_YUVA422P12
@ AV_OPT_TYPE_INT
Underlying C type is int.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
enum AVStereo3DView view
Determines which views are packed.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
static const AVFilterPad framepack_inputs[]
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
AVStereo3DType
List of possible 3D Types.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
#define AV_PIX_FMT_YUV444P14
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
#define AV_PIX_FMT_GRAY12
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
static enum AVPixelFormat formats_supported[]