24 #include <sys/types.h> 34 #define QCOM_TILE_WIDTH 64 35 #define QCOM_TILE_HEIGHT 32 36 #define QCOM_TILE_SIZE (QCOM_TILE_WIDTH * QCOM_TILE_HEIGHT) 37 #define QCOM_TILE_GROUP_SIZE (4 * QCOM_TILE_SIZE) 86 for (i = 0; i < 3; i++) {
97 height = avctx->
height / 2;
98 stride = (s->
stride + 1) / 2;
111 memcpy(frame->
data[i], src, height * stride);
117 width = avctx->
width;
122 for (j = 0; j <
height; j++) {
123 memcpy(dst, src, width);
141 for (i = 0; i < 2; i++) {
144 src = data + info->
offset;
151 height = avctx->
height / 2;
159 memcpy(frame->
data[i], src, height * s->
stride);
165 width = avctx->
width;
170 for (j = 0; j <
height; j++) {
171 memcpy(dst, src, width);
191 for (i = 0; i < 2; i++) {
194 src = data + info->
offset;
198 height = avctx->
height / 2;
207 memcpy(frame->
data[i], src, height * s->
stride);
213 width = avctx->
width;
218 for (j = 0; j <
height; j++) {
219 memcpy(dst, src, width);
261 size_t flim = x + (y & ~1) * w;
264 flim += (x & ~3) + 2;
265 }
else if ((h & 1) == 0 || y != (h - 1)) {
266 flim += (x + 2) & ~3;
280 size_t linesize = frame->
linesize[0];
284 const size_t tile_w_align = (tile_w + 1) & ~1;
292 for(
size_t y = 0; y < tile_h_luma; y++) {
293 size_t row_width =
width;
294 for(
size_t x = 0; x < tile_w; x++) {
295 size_t tile_width = row_width;
296 size_t tile_height =
height;
301 size_t chroma_idx = (luma_idx / linesize) * linesize / 2 + (luma_idx % linesize);
308 const uint8_t *src_chroma = data + luma_size
311 src_chroma += QCOM_TILE_SIZE/2;
314 if (tile_width > QCOM_TILE_WIDTH)
322 while (tile_height--) {
323 memcpy(frame->
data[0] + luma_idx, src_luma, tile_width);
325 luma_idx += linesize;
327 memcpy(frame->
data[0] + luma_idx, src_luma, tile_width);
329 luma_idx += linesize;
331 memcpy(frame->
data[1] + chroma_idx, src_chroma, tile_width);
333 chroma_idx += linesize;
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
Memory handling functions.
reference-counted frame API
int width
picture width / height.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
GLint GLenum GLboolean GLsizei stride