Go to the documentation of this file.
24 #include <sys/types.h>
34 #define QCOM_TILE_WIDTH 64
35 #define QCOM_TILE_HEIGHT 32
36 #define QCOM_TILE_SIZE (QCOM_TILE_WIDTH * QCOM_TILE_HEIGHT)
37 #define QCOM_TILE_GROUP_SIZE (4 * QCOM_TILE_SIZE)
86 for (
i = 0;
i < 3;
i++) {
94 src +=
s->crop_top *
s->stride;
100 src +=
s->slice_height *
s->stride;
107 src += (
s->crop_left / 2);
122 for (j = 0; j <
height; j++) {
141 for (
i = 0;
i < 2;
i++) {
148 src +=
s->crop_top *
s->stride;
153 src +=
s->slice_height *
s->stride;
154 src +=
s->crop_top *
s->stride;
158 if (
frame->linesize[
i] ==
s->stride) {
170 for (j = 0; j <
height; j++) {
191 for (
i = 0;
i < 2;
i++) {
200 src += (
s->slice_height -
s->crop_top / 2) *
s->stride;
202 src +=
s->crop_top *
s->stride;
206 if (
frame->linesize[
i] ==
s->stride) {
218 for (j = 0; j <
height; j++) {
261 size_t flim = x + (y & ~1) *
w;
264 flim += (x & ~3) + 2;
265 }
else if ((
h & 1) == 0 || y != (
h - 1)) {
266 flim += (x + 2) & ~3;
280 size_t linesize =
frame->linesize[0];
284 const size_t tile_w_align = (tile_w + 1) & ~1;
292 for(
size_t y = 0; y < tile_h_luma; y++) {
293 size_t row_width =
width;
294 for(
size_t x = 0; x < tile_w; x++) {
295 size_t tile_width = row_width;
296 size_t tile_height =
height;
301 size_t chroma_idx = (luma_idx / linesize) * linesize / 2 + (luma_idx % linesize);
304 const uint8_t *src_luma =
data
308 const uint8_t *src_chroma =
data + luma_size
322 while (tile_height--) {
323 memcpy(
frame->data[0] + luma_idx, src_luma, tile_width);
325 luma_idx += linesize;
327 memcpy(
frame->data[0] + luma_idx, src_luma, tile_width);
329 luma_idx += linesize;
331 memcpy(
frame->data[1] + chroma_idx, src_chroma, tile_width);
333 chroma_idx += linesize;
This structure describes decoded (raw) audio or video data.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define i(width, name, range_min, range_max)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
int width
picture width / height.