FFmpeg
spherical.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Vittorio Giovara <vittorio.giovara@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Spherical video
24  */
25 
26 #ifndef AVUTIL_SPHERICAL_H
27 #define AVUTIL_SPHERICAL_H
28 
29 #include <stddef.h>
30 #include <stdint.h>
31 
32 /**
33  * @addtogroup lavu_video
34  * @{
35  *
36  * @defgroup lavu_video_spherical Spherical video mapping
37  * @{
38  */
39 
40 /**
41  * @addtogroup lavu_video_spherical
42  * A spherical video file contains surfaces that need to be mapped onto a
43  * sphere. Depending on how the frame was converted, a different distortion
44  * transformation or surface recomposition function needs to be applied before
45  * the video should be mapped and displayed.
46  */
47 
48 /**
49  * Projection of the video surface(s) on a sphere.
50  */
52  /**
53  * Video represents a sphere mapped on a flat surface using
54  * equirectangular projection.
55  */
57 
58  /**
59  * Video frame is split into 6 faces of a cube, and arranged on a
60  * 3x2 layout. Faces are oriented upwards for the front, left, right,
61  * and back faces. The up face is oriented so the top of the face is
62  * forwards and the down face is oriented so the top of the face is
63  * to the back.
64  */
66 
67  /**
68  * Video represents a portion of a sphere mapped on a flat surface
69  * using equirectangular projection. The @ref bounding fields indicate
70  * the position of the current video in a larger surface.
71  */
73 };
74 
75 /**
76  * This structure describes how to handle spherical videos, outlining
77  * information about projection, initial layout, and any other view modifier.
78  *
79  * @note The struct must be allocated with av_spherical_alloc() and
80  * its size is not a part of the public ABI.
81  */
82 typedef struct AVSphericalMapping {
83  /**
84  * Projection type.
85  */
87 
88  /**
89  * @name Initial orientation
90  * @{
91  * There fields describe additional rotations applied to the sphere after
92  * the video frame is mapped onto it. The sphere is rotated around the
93  * viewer, who remains stationary. The order of transformation is always
94  * yaw, followed by pitch, and finally by roll.
95  *
96  * The coordinate system matches the one defined in OpenGL, where the
97  * forward vector (z) is coming out of screen, and it is equivalent to
98  * a rotation matrix of R = r_y(yaw) * r_x(pitch) * r_z(roll).
99  *
100  * A positive yaw rotates the portion of the sphere in front of the viewer
101  * toward their right. A positive pitch rotates the portion of the sphere
102  * in front of the viewer upwards. A positive roll tilts the portion of
103  * the sphere in front of the viewer to the viewer's right.
104  *
105  * These values are exported as 16.16 fixed point.
106  *
107  * See this equirectangular projection as example:
108  *
109  * @code{.unparsed}
110  * Yaw
111  * -180 0 180
112  * 90 +-------------+-------------+ 180
113  * | | | up
114  * P | | | y| forward
115  * i | ^ | | /z
116  * t 0 +-------------X-------------+ 0 Roll | /
117  * c | | | | /
118  * h | | | 0|/_____right
119  * | | | x
120  * -90 +-------------+-------------+ -180
121  *
122  * X - the default camera center
123  * ^ - the default up vector
124  * @endcode
125  */
126  int32_t yaw; ///< Rotation around the up vector [-180, 180].
127  int32_t pitch; ///< Rotation around the right vector [-90, 90].
128  int32_t roll; ///< Rotation around the forward vector [-180, 180].
129  /**
130  * @}
131  */
132 
133  /**
134  * @name Bounding rectangle
135  * @anchor bounding
136  * @{
137  * These fields indicate the location of the current tile, and where
138  * it should be mapped relative to the original surface. They are
139  * exported as 0.32 fixed point, and can be converted to classic
140  * pixel values with av_spherical_bounds().
141  *
142  * @code{.unparsed}
143  * +----------------+----------+
144  * | |bound_top |
145  * | +--------+ |
146  * | bound_left |tile | |
147  * +<---------->| |<--->+bound_right
148  * | +--------+ |
149  * | | |
150  * | bound_bottom| |
151  * +----------------+----------+
152  * @endcode
153  *
154  * If needed, the original video surface dimensions can be derived
155  * by adding the current stream or frame size to the related bounds,
156  * like in the following example:
157  *
158  * @code{c}
159  * original_width = tile->width + bound_left + bound_right;
160  * original_height = tile->height + bound_top + bound_bottom;
161  * @endcode
162  *
163  * @note These values are valid only for the tiled equirectangular
164  * projection type (@ref AV_SPHERICAL_EQUIRECTANGULAR_TILE),
165  * and should be ignored in all other cases.
166  */
167  uint32_t bound_left; ///< Distance from the left edge
168  uint32_t bound_top; ///< Distance from the top edge
169  uint32_t bound_right; ///< Distance from the right edge
170  uint32_t bound_bottom; ///< Distance from the bottom edge
171  /**
172  * @}
173  */
174 
175  /**
176  * Number of pixels to pad from the edge of each cube face.
177  *
178  * @note This value is valid for only for the cubemap projection type
179  * (@ref AV_SPHERICAL_CUBEMAP), and should be ignored in all other
180  * cases.
181  */
182  uint32_t padding;
184 
185 /**
186  * Allocate a AVSphericalVideo structure and initialize its fields to default
187  * values.
188  *
189  * @return the newly allocated struct or NULL on failure
190  */
192 
193 /**
194  * Convert the @ref bounding fields from an AVSphericalVideo
195  * from 0.32 fixed point to pixels.
196  *
197  * @param map The AVSphericalVideo map to read bound values from.
198  * @param width Width of the current frame or stream.
199  * @param height Height of the current frame or stream.
200  * @param left Pixels from the left edge.
201  * @param top Pixels from the top edge.
202  * @param right Pixels from the right edge.
203  * @param bottom Pixels from the bottom edge.
204  */
206  size_t width, size_t height,
207  size_t *left, size_t *top,
208  size_t *right, size_t *bottom);
209 
210 /**
211  * Provide a human-readable name of a given AVSphericalProjection.
212  *
213  * @param projection The input AVSphericalProjection.
214  *
215  * @return The name of the AVSphericalProjection, or "unknown".
216  */
217 const char *av_spherical_projection_name(enum AVSphericalProjection projection);
218 
219 /**
220  * Get the AVSphericalProjection form a human-readable name.
221  *
222  * @param name The input string.
223  *
224  * @return The AVSphericalProjection value, or -1 if not found.
225  */
226 int av_spherical_from_name(const char *name);
227 /**
228  * @}
229  * @}
230  */
231 
232 #endif /* AVUTIL_SPHERICAL_H */
AVSphericalProjection
AVSphericalProjection
Projection of the video surface(s) on a sphere.
Definition: spherical.h:51
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVSphericalMapping::projection
enum AVSphericalProjection projection
Projection type.
Definition: spherical.h:86
AVSphericalMapping::bound_bottom
uint32_t bound_bottom
Distance from the bottom edge.
Definition: spherical.h:170
av_spherical_tile_bounds
void av_spherical_tile_bounds(const AVSphericalMapping *map, size_t width, size_t height, size_t *left, size_t *top, size_t *right, size_t *bottom)
Convert the bounding fields from an AVSphericalVideo from 0.32 fixed point to pixels.
Definition: spherical.c:37
AV_SPHERICAL_EQUIRECTANGULAR_TILE
@ AV_SPHERICAL_EQUIRECTANGULAR_TILE
Video represents a portion of a sphere mapped on a flat surface using equirectangular projection.
Definition: spherical.h:72
AV_SPHERICAL_EQUIRECTANGULAR
@ AV_SPHERICAL_EQUIRECTANGULAR
Video represents a sphere mapped on a flat surface using equirectangular projection.
Definition: spherical.h:56
width
#define width
AVSphericalMapping::bound_top
uint32_t bound_top
Distance from the top edge.
Definition: spherical.h:168
int32_t
int32_t
Definition: audio_convert.c:194
av_spherical_projection_name
const char * av_spherical_projection_name(enum AVSphericalProjection projection)
Provide a human-readable name of a given AVSphericalProjection.
Definition: spherical.c:61
AV_SPHERICAL_CUBEMAP
@ AV_SPHERICAL_CUBEMAP
Video frame is split into 6 faces of a cube, and arranged on a 3x2 layout.
Definition: spherical.h:65
AVSphericalMapping::bound_right
uint32_t bound_right
Distance from the right edge.
Definition: spherical.h:169
size
int size
Definition: twinvq_data.h:10344
height
#define height
AVSphericalMapping::padding
uint32_t padding
Number of pixels to pad from the edge of each cube face.
Definition: spherical.h:182
AVSphericalMapping::roll
int32_t roll
Rotation around the forward vector [-180, 180].
Definition: spherical.h:128
AVSphericalMapping::pitch
int32_t pitch
Rotation around the right vector [-90, 90].
Definition: spherical.h:127
av_spherical_from_name
int av_spherical_from_name(const char *name)
Get the AVSphericalProjection form a human-readable name.
Definition: spherical.c:69
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
av_spherical_alloc
AVSphericalMapping * av_spherical_alloc(size_t *size)
Allocate a AVSphericalVideo structure and initialize its fields to default values.
Definition: spherical.c:25
AVSphericalMapping::bound_left
uint32_t bound_left
Distance from the left edge.
Definition: spherical.h:167
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
AVSphericalMapping
This structure describes how to handle spherical videos, outlining information about projection,...
Definition: spherical.h:82
AVSphericalMapping::yaw
int32_t yaw
Rotation around the up vector [-180, 180].
Definition: spherical.h:126