Go to the documentation of this file.
90 #define OFFSET(x) offsetof(TestSourceContext, x)
91 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
92 #define FLAGSR AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
94 #define SIZE_OPTIONS \
95 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
96 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
98 #define COMMON_OPTIONS_NOSIZE \
99 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },\
100 { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },\
101 { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
102 { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
103 { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS },
105 #define COMMON_OPTIONS SIZE_OPTIONS COMMON_OPTIONS_NOSIZE
107 #define NOSIZE_OPTIONS_OFFSET 2
125 test->duration < 0 ? -1 : (
double)
test->duration/1000000,
141 outlink->
w =
test->w;
142 outlink->
h =
test->h;
158 if (
test->duration >= 0 &&
164 if (
test->draw_once) {
165 if (
test->draw_once_reset) {
167 test->draw_once_reset = 0;
183 frame->key_frame = 1;
184 frame->interlaced_frame = 0;
187 if (!
test->draw_once)
196 #if CONFIG_COLOR_FILTER
198 static const AVOption color_options[] = {
218 test->fill_picture_fn = color_fill_picture;
248 static int color_process_command(
AVFilterContext *
ctx,
const char *cmd,
const char *args,
249 char *res,
int res_len,
int flags)
259 test->draw_once_reset = 1;
267 .config_props = color_config_props,
274 .priv_class = &color_class,
282 .process_command = color_process_command,
287 #if CONFIG_HALDCLUTSRC_FILTER
289 static const AVOption haldclutsrc_options[] = {
299 int i, j, k, x = 0, y = 0, is16bit = 0,
step;
304 const int w =
frame->width;
305 const int h =
frame->height;
307 const int linesize =
frame->linesize[0];
309 const int depth =
desc->comp[0].depth;
318 alpha = (1 << depth) - 1;
324 #define LOAD_CLUT(nbits) do { \
325 uint##nbits##_t *dst = ((uint##nbits##_t *)(data + y*linesize)) + x*step; \
326 dst[rgba_map[0]] = av_clip_uint##nbits(i * scale); \
327 dst[rgba_map[1]] = av_clip_uint##nbits(j * scale); \
328 dst[rgba_map[2]] = av_clip_uint##nbits(k * scale); \
330 dst[rgba_map[3]] = alpha; \
333 #define LOAD_CLUT_PLANAR(type, nbits) do { \
334 type *dst = ((type *)(frame->data[2] + y*frame->linesize[2])) + x; \
335 dst[0] = av_clip_uintp2(i * scale, nbits); \
336 dst = ((type *)(frame->data[0] + y*frame->linesize[0])) + x; \
337 dst[0] = av_clip_uintp2(j * scale, nbits); \
338 dst = ((type *)(frame->data[1] + y*frame->linesize[1])) + x; \
339 dst[0] = av_clip_uintp2(k * scale, nbits); \
341 dst = ((type *)(frame->data[3] + y*linesize)) + x; \
347 for (k = 0; k <
level; k++) {
348 for (j = 0; j <
level; j++) {
357 case 8: LOAD_CLUT_PLANAR(uint8_t, 8);
break;
358 case 9: LOAD_CLUT_PLANAR(uint16_t, 9);
break;
359 case 10: LOAD_CLUT_PLANAR(uint16_t,10);
break;
360 case 12: LOAD_CLUT_PLANAR(uint16_t,12);
break;
361 case 14: LOAD_CLUT_PLANAR(uint16_t,14);
break;
362 case 16: LOAD_CLUT_PLANAR(uint16_t,16);
break;
399 static int haldclutsrc_config_props(
AVFilterLink *outlink)
412 .config_props = haldclutsrc_config_props,
417 .
name =
"haldclutsrc",
419 .priv_class = &haldclutsrc_class,
421 .
init = haldclutsrc_init,
432 #if CONFIG_NULLSRC_FILTER
440 test->fill_picture_fn = nullsrc_fill_picture;
455 .priv_class = &nullsrc_yuvtestsrc_class,
456 .init = nullsrc_init,
466 #if CONFIG_TESTSRC_FILTER
468 static const AVOption testsrc_options[] = {
489 static void draw_rectangle(
unsigned val, uint8_t *dst,
int dst_linesize,
int segment_width,
490 int x,
int y,
int w,
int h)
495 dst += segment_width * (
step * x + y * dst_linesize);
496 w *= segment_width *
step;
498 for (
i = 0;
i <
h;
i++) {
504 static void draw_digit(
int digit, uint8_t *dst,
int dst_linesize,
510 #define LEFT_TOP_VBAR 8
511 #define LEFT_BOT_VBAR 16
512 #define RIGHT_TOP_VBAR 32
513 #define RIGHT_BOT_VBAR 64
525 static const unsigned char masks[10] = {
526 TOP_HBAR |BOT_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
527 RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
528 TOP_HBAR|MID_HBAR|BOT_HBAR|LEFT_BOT_VBAR |RIGHT_TOP_VBAR,
529 TOP_HBAR|MID_HBAR|BOT_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
530 MID_HBAR |LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
531 TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_BOT_VBAR,
532 TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR |RIGHT_BOT_VBAR,
533 TOP_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
534 TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
535 TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
537 unsigned mask = masks[digit];
544 segments[
i].x, segments[
i].y, segments[
i].
w, segments[
i].
h);
547 #define GRADIENT_SIZE (6 * 256)
554 int color, color_rest;
558 int dquad_x, dquad_y;
559 int grad, dgrad, rgrad, drgrad;
572 for (y = 0; y <
height; y++) {
578 for (x = 0; x <
width; x++) {
584 *(p++) = icolor & 1 ? 255 : 0;
585 *(p++) = icolor & 2 ? 255 : 0;
586 *(p++) = icolor & 4 ? 255 : 0;
588 if (color_rest >=
width) {
595 p0 +=
frame->linesize[0];
600 grad = (256 *
test->nb_frame *
test->time_base.num /
test->time_base.den) %
603 dgrad = GRADIENT_SIZE /
width;
604 drgrad = GRADIENT_SIZE %
width;
605 for (x = 0; x <
width; x++) {
607 grad < 256 || grad >= 5 * 256 ? 255 :
608 grad >= 2 * 256 && grad < 4 * 256 ? 0 :
609 grad < 2 * 256 ? 2 * 256 - 1 - grad : grad - 4 * 256;
611 grad >= 4 * 256 ? 0 :
612 grad >= 1 * 256 && grad < 3 * 256 ? 255 :
613 grad < 1 * 256 ? grad : 4 * 256 - 1 - grad;
616 grad >= 3 * 256 && grad < 5 * 256 ? 255 :
617 grad < 3 * 256 ? grad - 2 * 256 : 6 * 256 - 1 - grad;
620 if (rgrad >= GRADIENT_SIZE) {
622 rgrad -= GRADIENT_SIZE;
624 if (grad >= GRADIENT_SIZE)
625 grad -= GRADIENT_SIZE;
628 for (y =
height / 8; y > 0; y--) {
630 p +=
frame->linesize[0];
634 seg_size =
width / 80;
635 if (seg_size >= 1 &&
height >= 13 * seg_size) {
636 int64_t p10decimals = 1;
642 for (x = 0; x <
test->nb_decimals; x++)
647 y = (
height - seg_size * 13) / 2;
648 p =
data + (x*3 + y *
frame->linesize[0]);
649 for (
i = 0;
i < 8;
i++) {
650 p -= 3 * 8 * seg_size;
651 draw_digit(second % 10, p,
frame->linesize[0], seg_size);
663 test->fill_picture_fn = test_fill_picture;
667 static const AVFilterPad avfilter_vsrc_testsrc_outputs[] = {
679 .priv_class = &testsrc_class,
690 #if CONFIG_TESTSRC2_FILTER
692 static const AVOption testsrc2_options[] = {
702 uint8_t rgba[4] = { (argb >> 16) & 0xFF,
705 (argb >> 24) & 0xFF, };
709 static uint32_t color_gradient(
unsigned index)
711 unsigned si =
index & 0xFF, sd = 0xFF - si;
712 switch (
index >> 8) {
713 case 0:
return 0xFF0000 + (si << 8);
714 case 1:
return 0x00FF00 + (sd << 16);
715 case 2:
return 0x00FF00 + (si << 0);
716 case 3:
return 0x0000FF + (sd << 8);
717 case 4:
return 0x0000FF + (si << 16);
718 case 5:
return 0xFF0000 + (sd << 0);
724 int x0,
int y0,
const uint8_t *text)
728 for (; *text; text++) {
745 unsigned alpha = (uint32_t)
s->alpha << 24;
749 unsigned i, x = 0, x2;
755 set_color(
s, &
color, ((
i & 1) ? 0xFF0000 : 0) |
756 ((
i & 2) ? 0x00FF00 : 0) |
757 ((
i & 4) ? 0x0000FF : 0) |
760 x, 0, x2 - x,
frame->height);
768 unsigned x, dx, y0, y, g0,
g;
773 for (x = 0; x <
s->w; x += dx) {
777 y %= 2 * (
s->h - 16);
779 y = 2 * (
s->h - 16) - y;
787 if (
s->w >= 64 &&
s->h >= 64) {
788 int l = (
FFMIN(
s->w,
s->h) - 32) >> 1;
789 int steps =
FFMAX(4, l >> 5);
790 int xc = (
s->w >> 2) + (
s->w >> 1);
791 int yc = (
s->h >> 2);
796 for (
c = 0;
c < 3;
c++) {
797 set_color(
s, &
color, (0xBBBBBB ^ (0xFF << (
c << 3))) |
alpha);
801 pos < 3 * l ? 3 * l -
pos : 0;
802 yh =
pos < 1 * l ? 0 :
808 for (
i = 1;
i <= steps;
i++) {
820 if (
s->w >= 64 &&
s->h >= 64) {
821 int l = (
FFMIN(
s->w,
s->h) - 16) >> 2;
823 int xc = (
s->w >> 2);
824 int yc = (
s->h >> 2) + (
s->h >> 1);
837 set_color(
s, &
color, 0xFF808080);
844 x1, ym1, x2 - x1, ym2 - ym1);
847 xm1, y1, xm2 - xm1, y2 - y1);
850 x1, y1, x2 - x1, y2 - y1);
863 for (y = ymin; y + 15 < ymax; y += 16) {
864 for (x = xmin; x + 15 < xmax; x += 16) {
867 for (
i = 0;
i < 256;
i++) {
868 r =
r * 1664525 + 1013904223;
871 set_color(
s, &
color, 0xFF00FF80);
874 alpha, 16, 16, 16, 3, 0, x, y);
880 if (
s->w >= 16 &&
s->h >= 16) {
881 unsigned w =
s->w - 8;
882 unsigned h =
s->h - 8;
891 set_color(
s, &
color, 0xFF8000FF);
902 set_color(
s, &
color, 0xC0000000);
906 set_color(
s, &
color, 0xFFFF8000);
907 snprintf(buf,
sizeof(buf),
"%02d:%02d:%02d.%03d\n%12"PRIi64,
908 time / 3600000, (time / 60000) % 60, (time / 1000) % 60,
909 time % 1000,
s->pts);
917 s->fill_picture_fn = test2_fill_picture;
939 static const AVFilterPad avfilter_vsrc_testsrc2_outputs[] = {
943 .config_props = test2_config_props,
951 .priv_class = &testsrc2_class,
962 #if CONFIG_RGBTESTSRC_FILTER
964 static const AVOption rgbtestsrc_options[] = {
978 static void rgbtest_put_pixel(uint8_t *dstp[4],
int dst_linesizep[4],
982 uint8_t *dst = dstp[0];
983 int dst_linesize = dst_linesizep[0];
989 case AV_PIX_FMT_BGR444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
r >> 4) << 8) | ((
g >> 4) << 4) | (
b >> 4);
break;
990 case AV_PIX_FMT_RGB444: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
b >> 4) << 8) | ((
g >> 4) << 4) | (
r >> 4);
break;
991 case AV_PIX_FMT_BGR555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
r>>3)<<10) | ((
g>>3)<<5) | (
b>>3);
break;
992 case AV_PIX_FMT_RGB555: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
b>>3)<<10) | ((
g>>3)<<5) | (
r>>3);
break;
993 case AV_PIX_FMT_BGR565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
r>>3)<<11) | ((
g>>2)<<5) | (
b>>3);
break;
994 case AV_PIX_FMT_RGB565: ((uint16_t*)(dst + y*dst_linesize))[x] = ((
b>>3)<<11) | ((
g>>2)<<5) | (
r>>3);
break;
997 v = (
r << (rgba_map[
R]*8)) + (
g << (rgba_map[
G]*8)) + (
b << (rgba_map[
B]*8));
998 p = dst + 3*x + y*dst_linesize;
1005 v = (
r << (rgba_map[
R]*8)) + (
g << (rgba_map[
G]*8)) + (
b << (rgba_map[
B]*8)) + (255
U << (rgba_map[
A]*8));
1006 p = dst + 4*x + y*dst_linesize;
1010 p = dstp[0] + x + y * dst_linesizep[0];
1012 p = dstp[1] + x + y * dst_linesizep[1];
1014 p = dstp[2] + x + y * dst_linesizep[2];
1022 p16 = (uint16_t *)(dstp[0] + x*2 + y * dst_linesizep[0]);
1024 p16 = (uint16_t *)(dstp[1] + x*2 + y * dst_linesizep[1]);
1026 p16 = (uint16_t *)(dstp[2] + x*2 + y * dst_linesizep[2]);
1037 for (y = 0; y <
h; y++) {
1038 for (x = 0; x <
w; x++) {
1040 int r = 0,
g = 0,
b = 0;
1042 if (6*y <
h )
r =
c;
1043 else if (6*y < 2*
h)
g =
c,
b =
c;
1044 else if (6*y < 3*
h)
g =
c;
1045 else if (6*y < 4*
h)
r =
c,
b =
c;
1046 else if (6*y < 5*
h)
b =
c;
1049 rgbtest_put_pixel(
frame->data,
frame->linesize, x, y,
r,
g,
b,
1050 ctx->outputs[0]->format,
test->rgba_map);
1060 for (y = 0; y <
h; y++) {
1061 for (x = 0; x <
w; x++) {
1063 int r = 0,
g = 0,
b = 0;
1065 if (3*y <
h )
r =
c;
1066 else if (3*y < 2*
h)
g =
c;
1069 rgbtest_put_pixel(
frame->data,
frame->linesize, x, y,
r,
g,
b,
1070 ctx->outputs[0]->format,
test->rgba_map);
1079 test->draw_once = 1;
1080 test->fill_picture_fn =
test->complement ? rgbtest_fill_picture_complement : rgbtest_fill_picture;
1105 static const AVFilterPad avfilter_vsrc_rgbtestsrc_outputs[] = {
1109 .config_props = rgbtest_config_props,
1114 .
name =
"rgbtestsrc",
1117 .priv_class = &rgbtestsrc_class,
1118 .
init = rgbtest_init,
1128 #if CONFIG_YUVTESTSRC_FILTER
1135 const int mid = 1 << (
desc->comp[0].depth - 1);
1136 uint8_t *ydst =
frame->data[0];
1137 uint8_t *udst =
frame->data[1];
1138 uint8_t *vdst =
frame->data[2];
1139 int ylinesize =
frame->linesize[0];
1140 int ulinesize =
frame->linesize[1];
1141 int vlinesize =
frame->linesize[2];
1143 for (y = 0; y <
h; y++) {
1144 for (x = 0; x <
w; x++) {
1158 for (; y <
h; y++) {
1159 for (x = 0; x <
w; x++) {
1172 for (; y <
frame->height; y++) {
1173 for (x = 0; x <
w; x++) {
1192 const int mid = 1 << (
desc->comp[0].depth - 1);
1193 uint16_t *ydst = (uint16_t *)
frame->data[0];
1194 uint16_t *udst = (uint16_t *)
frame->data[1];
1195 uint16_t *vdst = (uint16_t *)
frame->data[2];
1196 int ylinesize =
frame->linesize[0] / 2;
1197 int ulinesize =
frame->linesize[1] / 2;
1198 int vlinesize =
frame->linesize[2] / 2;
1200 for (y = 0; y <
h; y++) {
1201 for (x = 0; x <
w; x++) {
1215 for (; y <
h; y++) {
1216 for (x = 0; x <
w; x++) {
1229 for (; y <
frame->height; y++) {
1230 for (x = 0; x <
w; x++) {
1248 test->draw_once = 1;
1265 test->fill_picture_fn =
desc->comp[0].depth > 8 ? yuvtest_fill_picture16 : yuvtest_fill_picture8;
1269 static const AVFilterPad avfilter_vsrc_yuvtestsrc_outputs[] = {
1273 .config_props = yuvtest_config_props,
1278 .
name =
"yuvtestsrc",
1281 .priv_class = &nullsrc_yuvtestsrc_class,
1282 .
init = yuvtest_init,
1292 #if CONFIG_PAL75BARS_FILTER || CONFIG_PAL100BARS_FILTER || CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER
1294 static const uint8_t rainbow[7][4] = {
1295 { 180, 128, 128, 255 },
1296 { 162, 44, 142, 255 },
1297 { 131, 156, 44, 255 },
1298 { 112, 72, 58, 255 },
1299 { 84, 184, 198, 255 },
1300 { 65, 100, 212, 255 },
1301 { 35, 212, 114, 255 },
1304 static const uint8_t rainbow100[7][4] = {
1305 { 235, 128, 128, 255 },
1306 { 210, 16, 146, 255 },
1307 { 170, 166, 16, 255 },
1308 { 145, 54, 34, 255 },
1309 { 106, 202, 222, 255 },
1310 { 81, 90, 240, 255 },
1311 { 41, 240, 110, 255 },
1314 static const uint8_t rainbowhd[7][4] = {
1315 { 180, 128, 128, 255 },
1316 { 168, 44, 136, 255 },
1317 { 145, 147, 44, 255 },
1318 { 133, 63, 52, 255 },
1319 { 63, 193, 204, 255 },
1320 { 51, 109, 212, 255 },
1321 { 28, 212, 120, 255 },
1324 static const uint8_t wobnair[7][4] = {
1325 { 35, 212, 114, 255 },
1326 { 19, 128, 128, 255 },
1327 { 84, 184, 198, 255 },
1328 { 19, 128, 128, 255 },
1329 { 131, 156, 44, 255 },
1330 { 19, 128, 128, 255 },
1331 { 180, 128, 128, 255 },
1334 static const uint8_t white[4] = { 235, 128, 128, 255 };
1337 static const uint8_t neg4ire[4] = { 7, 128, 128, 255 };
1338 static const uint8_t pos4ire[4] = { 24, 128, 128, 255 };
1341 static const uint8_t i_pixel[4] = { 57, 156, 97, 255 };
1342 static const uint8_t q_pixel[4] = { 44, 171, 147, 255 };
1344 static const uint8_t gray40[4] = { 104, 128, 128, 255 };
1345 static const uint8_t gray15[4] = { 49, 128, 128, 255 };
1346 static const uint8_t cyan[4] = { 188, 154, 16, 255 };
1347 static const uint8_t yellow[4] = { 219, 16, 138, 255 };
1348 static const uint8_t blue[4] = { 32, 240, 118, 255 };
1349 static const uint8_t red[4] = { 63, 102, 240, 255 };
1350 static const uint8_t black0[4] = { 16, 128, 128, 255 };
1351 static const uint8_t black2[4] = { 20, 128, 128, 255 };
1352 static const uint8_t black4[4] = { 25, 128, 128, 255 };
1353 static const uint8_t neg2[4] = { 12, 128, 128, 255 };
1356 int x,
int y,
int w,
int h,
1371 for (plane = 0;
frame->data[plane]; plane++) {
1372 const int c =
color[plane];
1373 const int linesize =
frame->linesize[plane];
1374 int i, px, py, pw, ph;
1376 if (plane == 1 || plane == 2) {
1377 px = x >>
desc->log2_chroma_w;
1379 py = y >>
desc->log2_chroma_h;
1388 p0 = p =
frame->data[plane] + py * linesize + px;
1391 for (
i = 1;
i < ph;
i++, p += linesize)
1413 #if CONFIG_PAL75BARS_FILTER
1426 draw_bar(
test, white, x, 0, r_w,
test->h, picref);
1428 for (
i = 1;
i < 7;
i++) {
1429 draw_bar(
test, rainbow[
i], x, 0, r_w,
test->h, picref);
1432 draw_bar(
test, black0, x, 0, r_w,
test->h, picref);
1439 test->fill_picture_fn = pal75bars_fill_picture;
1440 test->draw_once = 1;
1445 .
name =
"pal75bars",
1447 .priv_class = &palbars_class,
1449 .
init = pal75bars_init,
1459 #if CONFIG_PAL100BARS_FILTER
1472 for (
i = 0;
i < 7;
i++) {
1473 draw_bar(
test, rainbow100[
i], x, 0, r_w,
test->h, picref);
1476 draw_bar(
test, black0, x, 0, r_w,
test->h, picref);
1483 test->fill_picture_fn = pal100bars_fill_picture;
1484 test->draw_once = 1;
1489 .
name =
"pal100bars",
1491 .priv_class = &palbars_class,
1493 .
init = pal100bars_init,
1505 #if CONFIG_SMPTEBARS_FILTER
1510 int r_w, r_h, w_h, p_w, p_h,
i,
tmp, x = 0;
1519 p_h =
test->h - w_h - r_h;
1521 for (
i = 0;
i < 7;
i++) {
1522 draw_bar(
test, rainbow[
i], x, 0, r_w, r_h, picref);
1523 draw_bar(
test, wobnair[
i], x, r_h, r_w, w_h, picref);
1527 draw_bar(
test, i_pixel, x, r_h + w_h, p_w, p_h, picref);
1529 draw_bar(
test, white, x, r_h + w_h, p_w, p_h, picref);
1531 draw_bar(
test, q_pixel, x, r_h + w_h, p_w, p_h, picref);
1534 draw_bar(
test, black0, x, r_h + w_h,
tmp, p_h, picref);
1537 draw_bar(
test, neg4ire, x, r_h + w_h,
tmp, p_h, picref);
1539 draw_bar(
test, black0, x, r_h + w_h,
tmp, p_h, picref);
1541 draw_bar(
test, pos4ire, x, r_h + w_h,
tmp, p_h, picref);
1543 draw_bar(
test, black0, x, r_h + w_h,
test->w - x, p_h, picref);
1550 test->fill_picture_fn = smptebars_fill_picture;
1551 test->draw_once = 1;
1556 .
name =
"smptebars",
1559 .priv_class = &smptebars_class,
1560 .
init = smptebars_init,
1570 #if CONFIG_SMPTEHDBARS_FILTER
1575 int d_w, r_w, r_h, l_w,
i,
tmp, x = 0, y = 0;
1582 draw_bar(
test, gray40, x, 0, d_w, r_h, picref);
1586 for (
i = 0;
i < 7;
i++) {
1587 draw_bar(
test, rainbowhd[
i], x, 0, r_w, r_h, picref);
1590 draw_bar(
test, gray40, x, 0,
test->w - x, r_h, picref);
1593 draw_bar(
test, cyan, 0, y, d_w, r_h, picref);
1595 draw_bar(
test, i_pixel, x, y, r_w, r_h, picref);
1598 draw_bar(
test, rainbowhd[0], x, y,
tmp, r_h, picref);
1601 draw_bar(
test, blue, x, y,
test->w - x, r_h, picref);
1603 draw_bar(
test, yellow, 0, y, d_w, r_h, picref);
1605 draw_bar(
test, q_pixel, x, y, r_w, r_h, picref);
1609 uint8_t yramp[4] = {0};
1611 yramp[0] =
i * 255 /
tmp;
1619 draw_bar(
test, red, x, y,
test->w - x, r_h, picref);
1621 draw_bar(
test, gray15, 0, y, d_w,
test->h - y, picref);
1624 draw_bar(
test, black0, x, y,
tmp,
test->h - y, picref);
1627 draw_bar(
test, white, x, y,
tmp,
test->h - y, picref);
1630 draw_bar(
test, black0, x, y,
tmp,
test->h - y, picref);
1633 draw_bar(
test, neg2, x, y,
tmp,
test->h - y, picref);
1635 draw_bar(
test, black0, x, y,
tmp,
test->h - y, picref);
1637 draw_bar(
test, black2, x, y,
tmp,
test->h - y, picref);
1639 draw_bar(
test, black0, x, y,
tmp,
test->h - y, picref);
1641 draw_bar(
test, black4, x, y,
tmp,
test->h - y, picref);
1644 draw_bar(
test, black0, x, y, r_w,
test->h - y, picref);
1646 draw_bar(
test, gray15, x, y,
test->w - x,
test->h - y, picref);
1653 test->fill_picture_fn = smptehdbars_fill_picture;
1654 test->draw_once = 1;
1659 .
name =
"smptehdbars",
1661 .priv_class = &smptebars_class,
1663 .
init = smptehdbars_init,
1677 #if CONFIG_ALLYUV_FILTER
1681 const int ys =
frame->linesize[0];
1682 const int us =
frame->linesize[1];
1683 const int vs =
frame->linesize[2];
1686 for (y = 0; y < 4096; y++) {
1687 for (x = 0; x < 2048; x++) {
1688 frame->data[0][y * ys + x] = ((x / 8) % 256);
1689 frame->data[0][y * ys + 4095 - x] = ((x / 8) % 256);
1692 for (x = 0; x < 2048; x+=8) {
1693 for (j = 0; j < 8; j++) {
1694 frame->data[1][vs * y + x + j] = (y%16 + (j % 8) * 16);
1695 frame->data[1][vs * y + 4095 - x - j] = (128 + y%16 + (j % 8) * 16);
1699 for (x = 0; x < 4096; x++)
1700 frame->data[2][y *
us + x] = 256 * y / 4096;
1709 test->draw_once = 1;
1710 test->fill_picture_fn = allyuv_fill_picture;
1714 static const AVFilterPad avfilter_vsrc_allyuv_outputs[] = {
1726 .priv_class = &allyuv_allrgb_class,
1727 .
init = allyuv_init,
1737 #if CONFIG_ALLRGB_FILTER
1742 const int linesize =
frame->linesize[0];
1745 for (y = 0; y < 4096; y++) {
1746 uint8_t *dst =
line;
1748 for (x = 0; x < 4096; x++) {
1751 *dst++ = (x >> 8) | ((y >> 8) << 4);
1762 test->draw_once = 1;
1763 test->fill_picture_fn = allrgb_fill_picture;
1775 static const AVFilterPad avfilter_vsrc_allrgb_outputs[] = {
1779 .config_props = allrgb_config_props,
1787 .priv_class = &allyuv_allrgb_class,
1788 .
init = allrgb_init,
1798 #if CONFIG_COLORSPECTRUM_FILTER
1800 static const AVOption colorspectrum_options[] = {
1811 static inline float mix(
float a,
float b,
float mix)
1816 static void hsb2rgb(
const float *
c,
float *
rgb)
1829 const float w =
frame->width - 1.f;
1830 const float h =
frame->height - 1.f;
1833 for (
int y = 0; y <
frame->height; y++) {
1834 float *
r = (
float *)(
frame->data[2] + y *
frame->linesize[2]);
1835 float *
g = (
float *)(
frame->data[0] + y *
frame->linesize[0]);
1836 float *
b = (
float *)(
frame->data[1] + y *
frame->linesize[1]);
1837 const float yh = y /
h;
1839 c[1] =
test->type == 2 ? yh > 0.5f ? 2.f * (yh - 0.5f) : 1.
f - 2.
f * yh :
test->type == 1 ? 1.f - yh : yh;
1841 c[3] =
test->type == 1 ? 1.f :
test->type == 2 ? (yh > 0.5f ? 0.f : 1.f): 0.f;
1842 for (
int x = 0; x <
frame->width; x++) {
1859 test->draw_once = 1;
1860 test->fill_picture_fn = colorspectrum_fill_picture;
1864 static const AVFilterPad avfilter_vsrc_colorspectrum_outputs[] = {
1873 .
name =
"colorspectrum",
1876 .priv_class = &colorspectrum_class,
1877 .
init = colorspectrum_init,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_PIX_FMT_GBRAP16
enum AVColorRange color_range
MPEG vs JPEG YUV range.
AVPixelFormat
Pixel format.
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
static int mix(int c0, int c1)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
static av_cold void uninit(AVFilterContext *ctx)
#define FILTER_PIXFMTS_ARRAY(array)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
enum AVColorSpace colorspace
YUV colorspace type.
This structure describes decoded (raw) audio or video data.
const AVFilter ff_vsrc_color
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define FILTER_QUERY_FUNC(func)
const AVFilter ff_vsrc_pal75bars
#define AV_LOG_VERBOSE
Detailed information.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static av_cold int init(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const AVFilter ff_vsrc_haldclutsrc
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
const AVFilter ff_vsrc_yuvtestsrc
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void * priv
private data for use by the filter
#define AV_PIX_FMT_GBRP10
static void draw_rectangle(AVFormatContext *s)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline float scale(float x, float s)
#define us(width, name, range_min, range_max, subs,...)
const AVFilter ff_vsrc_allrgb
void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color, uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, const uint8_t *mask, int mask_linesize, int mask_w, int mask_h, int l2depth, unsigned endianness, int x0, int y0)
Blend an alpha mask with an uniform color.
static __device__ float fabsf(float a)
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
const AVFilter ff_vsrc_pal100bars
int64_t duration
duration expressed in microseconds
#define FF_ARRAY_ELEMS(a)
static const uint16_t mask[17]
#define AV_PIX_FMT_GBRAP10
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
Init a draw context.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const AVFilter ff_vsrc_testsrc2
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static const struct @321 planes[]
#define AV_PIX_FMT_GBRP16
AVRational sar
sample aspect ratio
#define AV_PIX_FMT_RGBA64
Describe the class of an AVClass context structure.
const AVFilter ff_vsrc_colorspectrum
Rational number (pair of numerator and denominator).
@ AV_PICTURE_TYPE_I
Intra.
#define NOSIZE_OPTIONS_OFFSET
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
const AVFilter ff_vsrc_allyuv
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
AVFrame * picref
cached reference containing the painted picture
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int config_props(AVFilterLink *outlink)
const AVFilter ff_vsrc_smptehdbars
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
#define FILTER_PIXFMTS(...)
void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color, uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, int x0, int y0, int w, int h)
Blend a rectangle with an uniform color.
@ AV_ROUND_ZERO
Round toward zero.
#define AV_PIX_FMT_GBRPF32
int format
agreed upon media format
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static AVRational av_make_q(int num, int den)
Create an AVRational.
static void draw_text(FFDrawContext *draw, AVFrame *frame, FFDrawColor *color, int x0, int y0, const uint8_t *text, int vertical)
#define AV_PIX_FMT_BGR555
void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, uint8_t *dst[], int dst_linesize[], int dst_x, int dst_y, int w, int h)
Fill a rectangle with an uniform color.
#define AV_PIX_FMT_YUV444P12
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
#define AVFILTER_DEFINE_CLASS(fname)
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
int draw_once
draw only the first frame, always put out the same picture
#define AV_PIX_FMT_BGRA64
const uint8_t avpriv_vga16_font[4096]
#define i(width, name, range_min, range_max)
const AVFilter ff_vsrc_nullsrc
int w
agreed upon image width
AVFilterFormats * ff_draw_supported_pixel_formats(unsigned flags)
Return the list of pixel formats supported by the draw functions.
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_BGR444
#define AV_PIX_FMT_RGB555
int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, int value)
Round a dimension according to subsampling.
const AVFilter ff_vsrc_smptebars
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
#define AV_PIX_FMT_BGR565
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_RGB565
void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
Prepare a color.
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const AVFilter ff_vsrc_rgbtestsrc
int h
agreed upon image height
static int activate(AVFilterContext *ctx)
AVFILTER_DEFINE_CLASS_EXT(nullsrc_yuvtestsrc, "nullsrc/yuvtestsrc", options)
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static const int factor[16]
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
const AVFilter ff_vsrc_testsrc
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static const int16_t alpha[]
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
int draw_once_reset
draw only the first frame or in case of reset
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
#define AV_PIX_FMT_YUV444P14
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
void(* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
#define COMMON_OPTIONS_NOSIZE
static const AVOption options[]
#define AV_PIX_FMT_RGB444