Go to the documentation of this file.
104 "Left and right sizes differ (%dx%d vs %dx%d).\n",
110 "Left and right time bases differ (%d/%d vs %d/%d).\n",
111 time_base.
num, time_base.
den,
117 "Left and right framerates differ (%d/%d vs %d/%d).\n",
118 frame_rate.
num, frame_rate.
den,
120 ctx->inputs[
RIGHT]->frame_rate.den);
127 s->depth =
s->pix_desc->comp[0].depth;
164 if (interleaved &&
s->depth <= 8) {
165 const uint8_t *leftp =
s->input_views[
LEFT]->data[0];
166 const uint8_t *rightp =
s->input_views[
RIGHT]->data[0];
167 uint8_t *dstp =
out->data[0];
168 int length =
out->width / 2;
169 int lines =
out->height;
171 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
172 if (plane == 1 || plane == 2) {
176 for (
i = 0;
i < lines;
i++) {
178 leftp =
s->input_views[
LEFT]->data[plane] +
179 s->input_views[
LEFT]->linesize[plane] *
i;
180 rightp =
s->input_views[
RIGHT]->data[plane] +
181 s->input_views[
RIGHT]->linesize[plane] *
i;
182 dstp =
out->data[plane] +
out->linesize[plane] *
i;
183 for (j = 0; j < length; j++) {
185 if ((
s->pix_desc->log2_chroma_w ||
186 s->pix_desc->log2_chroma_h) &&
187 (plane == 1 || plane == 2)) {
188 *dstp++ = (*leftp + *rightp) / 2;
189 *dstp++ = (*leftp + *rightp) / 2;
199 }
else if (interleaved &&
s->depth > 8) {
200 const uint16_t *leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[0];
201 const uint16_t *rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[0];
202 uint16_t *dstp = (uint16_t *)
out->data[0];
203 int length =
out->width / 2;
204 int lines =
out->height;
206 for (plane = 0; plane <
s->pix_desc->nb_components; plane++) {
207 if (plane == 1 || plane == 2) {
211 for (
i = 0;
i < lines;
i++) {
213 leftp = (
const uint16_t *)
s->input_views[
LEFT]->data[plane] +
214 s->input_views[
LEFT]->linesize[plane] *
i / 2;
215 rightp = (
const uint16_t *)
s->input_views[
RIGHT]->data[plane] +
216 s->input_views[
RIGHT]->linesize[plane] *
i / 2;
217 dstp = (uint16_t *)
out->data[plane] +
out->linesize[plane] *
i / 2;
218 for (j = 0; j < length; j++) {
220 if ((
s->pix_desc->log2_chroma_w ||
221 s->pix_desc->log2_chroma_h) &&
222 (plane == 1 || plane == 2)) {
223 *dstp++ = (*leftp + *rightp) / 2;
224 *dstp++ = (*leftp + *rightp) / 2;
235 for (
i = 0;
i < 2;
i++) {
236 const AVFrame *
const input_view =
s->input_views[
i];
237 const int psize = 1 + (
s->depth > 8);
239 int sub_w = psize * input_view->
width >>
s->pix_desc->log2_chroma_w;
241 dst[0] =
out->data[0] +
i * input_view->
width * psize;
242 dst[1] =
out->data[1] +
i * sub_w;
243 dst[2] =
out->data[2] +
i * sub_w;
262 for (
i = 0;
i < 2;
i++) {
263 const AVFrame *
const input_view =
s->input_views[
i];
266 int sub_h = input_view->
height >>
s->pix_desc->log2_chroma_h;
268 dst[0] =
out->data[0] +
i *
out->linesize[0] *
269 (interleaved + input_view->
height * (1 - interleaved));
270 dst[1] =
out->data[1] +
i *
out->linesize[1] *
271 (interleaved + sub_h * (1 - interleaved));
272 dst[2] =
out->data[2] +
i *
out->linesize[2] *
273 (interleaved + sub_h * (1 - interleaved));
275 linesizes[0] =
out->linesize[0] +
276 interleaved *
out->linesize[0];
277 linesizes[1] =
out->linesize[1] +
278 interleaved *
out->linesize[1];
279 linesizes[2] =
out->linesize[2] +
280 interleaved *
out->linesize[2];
318 if (!(
s->input_views[0] &&
s->input_views[1]))
323 for (
i = 0;
i < 2;
i++) {
334 stereo->
type =
s->format;
359 for (
i = 0;
i < 2;
i++)
368 stereo->
type =
s->format;
382 if (!
s->input_views[0]) {
388 if (!
s->input_views[1]) {
394 if (
s->input_views[0] &&
s->input_views[1])
401 !
s->input_views[0]) {
407 !
s->input_views[1]) {
415 #define OFFSET(x) offsetof(FramepackContext, x)
416 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
458 .priv_class = &framepack_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRAP16
AVPixelFormat
Pixel format.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static av_always_inline void spatial_frame_pack(AVFilterLink *outlink, AVFrame *dst)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define FILTER_PIXFMTS_ARRAY(array)
static const AVFilterPad framepack_outputs[]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_PIX_FMT_YUVA422P9
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int try_push_frame(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define AV_PIX_FMT_YUVA422P10
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define AV_PIX_FMT_YUVA420P9
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void * priv
private data for use by the filter
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void vertical_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
AVFILTER_DEFINE_CLASS(framepack)
#define AV_PIX_FMT_GRAY14
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_INPUTS(array)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
AVFrame * input_views[2]
input frames
Rational number (pair of numerator and denominator).
static void horizontal_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
const AVFilter ff_vf_framepack
int format
agreed upon media format
#define AV_PIX_FMT_YUV422P12
#define AV_NOPTS_VALUE
Undefined timestamp value.
enum AVStereo3DType format
frame pack type output
#define AV_PIX_FMT_YUV444P12
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static int activate(AVFilterContext *ctx)
AVFilterContext * src
source filter
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_YUVA444P10
#define i(width, name, range_min, range_max)
static int config_output(AVFilterLink *outlink)
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
static av_cold void framepack_uninit(AVFilterContext *ctx)
static const AVOption framepack_options[]
#define AV_PIX_FMT_YUV444P9
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_STEREO3D_COLUMNS
Views are packed per column.
enum AVStereo3DType type
How views are packed within the video.
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
const AVPixFmtDescriptor * pix_desc
agreed pixel format
int h
agreed upon image height
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
#define AV_PIX_FMT_YUVA422P12
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
enum AVStereo3DView view
Determines which views are packed.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
static const AVFilterPad framepack_inputs[]
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
AVStereo3DType
List of possible 3D Types.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
#define AV_PIX_FMT_YUV444P14
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
#define AV_PIX_FMT_GRAY12
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14
static enum AVPixelFormat formats_supported[]