FFmpeg
vsrc_mptestsrc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 /**
22  * @file
23  * MP test source, ported from MPlayer libmpcodecs/vf_test.c
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/parseutils.h"
29 #include "libavutil/pixdesc.h"
30 #include "avfilter.h"
31 #include "internal.h"
32 #include "formats.h"
33 #include "video.h"
34 
35 #define WIDTH 512
36 #define HEIGHT 512
37 
38 enum test_type {
51 };
52 
53 typedef struct MPTestContext {
54  const AVClass *class;
56  int64_t pts, max_pts, duration;
57  int hsub, vsub;
58  int test; ///< test_type
60 
61 #define OFFSET(x) offsetof(MPTestContext, x)
62 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
63 static const AVOption mptestsrc_options[]= {
64  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
65  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
66  { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
67  { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
68 
69  { "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
70  { "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
71  { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
72  { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
73  { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
74  { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
75  { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
76  { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
77  { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
78  { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
79  { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
80  { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
81  { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(mptestsrc);
86 
87 static double c[64];
88 
89 static void init_idct(void)
90 {
91  int i, j;
92 
93  for (i = 0; i < 8; i++) {
94  double s = i == 0 ? sqrt(0.125) : 0.5;
95 
96  for (j = 0; j < 8; j++)
97  c[i*8+j] = s*cos((M_PI/8.0)*i*(j+0.5));
98  }
99 }
100 
101 static void idct(uint8_t *dst, int dst_linesize, int src[64])
102 {
103  int i, j, k;
104  double tmp[64];
105 
106  for (i = 0; i < 8; i++) {
107  for (j = 0; j < 8; j++) {
108  double sum = 0.0;
109 
110  for (k = 0; k < 8; k++)
111  sum += c[k*8+j] * src[8*i+k];
112 
113  tmp[8*i+j] = sum;
114  }
115  }
116 
117  for (j = 0; j < 8; j++) {
118  for (i = 0; i < 8; i++) {
119  double sum = 0.0;
120 
121  for (k = 0; k < 8; k++)
122  sum += c[k*8+i]*tmp[8*k+j];
123 
124  dst[dst_linesize*i + j] = av_clip_uint8(lrint(sum));
125  }
126  }
127 }
128 
129 static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h)
130 {
131  int x, y;
132 
133  for (y = 0; y < h; y++)
134  for (x = 0; x < w; x++)
135  dst[x + y*dst_linesize] = color;
136 }
137 
138 static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc)
139 {
140  int src[64];
141 
142  memset(src, 0, 64*sizeof(int));
143  src[0] = dc;
144  if (amp)
145  src[freq] = amp;
146  idct(dst, dst_linesize, src);
147 }
148 
149 static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc)
150 {
151  if (cbp&1) draw_basis(dst[0] , dst_linesize[0], amp, 1, dc);
152  if (cbp&2) draw_basis(dst[0]+8 , dst_linesize[0], amp, 1, dc);
153  if (cbp&4) draw_basis(dst[0]+ 8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
154  if (cbp&8) draw_basis(dst[0]+8+8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
155  if (cbp&16) draw_basis(dst[1] , dst_linesize[1], amp, 1, dc);
156  if (cbp&32) draw_basis(dst[2] , dst_linesize[2], amp, 1, dc);
157 }
158 
159 static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off)
160 {
161  const int step = FFMAX(256/(w*h/256), 1);
162  int x, y, color = off;
163 
164  for (y = 0; y < h; y += 16) {
165  for (x = 0; x < w; x += 16) {
166  draw_dc(dst + x + y*dst_linesize, dst_linesize, color, 8, 8);
167  color += step;
168  }
169  }
170 }
171 
172 static void freq_test(uint8_t *dst, int dst_linesize, int off)
173 {
174  int x, y, freq = 0;
175 
176  for (y = 0; y < 8*16; y += 16) {
177  for (x = 0; x < 8*16; x += 16) {
178  draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*(96+off), freq, 128*8);
179  freq++;
180  }
181  }
182 }
183 
184 static void amp_test(uint8_t *dst, int dst_linesize, int off)
185 {
186  int x, y, amp = off;
187 
188  for (y = 0; y < 16*16; y += 16) {
189  for (x = 0; x < 16*16; x += 16) {
190  draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*amp, 1, 128*8);
191  amp++;
192  }
193  }
194 }
195 
196 static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off)
197 {
198  int x, y, cbp = 0;
199 
200  for (y = 0; y < 16*8; y += 16) {
201  for (x = 0; x < 16*8; x += 16) {
202  uint8_t *dst1[3];
203  dst1[0] = dst[0] + x*2 + y*2*dst_linesize[0];
204  dst1[1] = dst[1] + x + y* dst_linesize[1];
205  dst1[2] = dst[2] + x + y* dst_linesize[2];
206 
207  draw_cbp(dst1, dst_linesize, cbp, (64+off)*4, 128*8);
208  cbp++;
209  }
210  }
211 }
212 
213 static void mv_test(uint8_t *dst, int dst_linesize, int off)
214 {
215  int x, y;
216 
217  for (y = 0; y < 16*16; y++) {
218  if (y&16)
219  continue;
220  for (x = 0; x < 16*16; x++)
221  dst[x + y*dst_linesize] = x + off*8/(y/32+1);
222  }
223 }
224 
225 static void ring1_test(uint8_t *dst, int dst_linesize, int off)
226 {
227  int x, y, color = 0;
228 
229  for (y = off; y < 16*16; y += 16) {
230  for (x = off; x < 16*16; x += 16) {
231  draw_dc(dst + x + y*dst_linesize, dst_linesize, ((x+y)&16) ? color : -color, 16, 16);
232  color++;
233  }
234  }
235 }
236 
237 static void ring2_test(uint8_t *dst, int dst_linesize, int off)
238 {
239  int x, y;
240 
241  for (y = 0; y < 16*16; y++) {
242  for (x = 0; x < 16*16; x++) {
243  double d = hypot(x-8*16, y-8*16);
244  double r = d/20 - (int)(d/20);
245  if (r < off/30.0) {
246  dst[x + y*dst_linesize] = 255;
247  dst[x + y*dst_linesize+256] = 0;
248  } else {
249  dst[x + y*dst_linesize] = x;
250  dst[x + y*dst_linesize+256] = x;
251  }
252  }
253  }
254 }
255 
257 {
258  MPTestContext *test = ctx->priv;
259 
260  test->max_pts = test->duration >= 0 ?
261  av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
262  test->pts = 0;
263 
264  av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
265  test->frame_rate.num, test->frame_rate.den,
266  test->duration < 0 ? -1 : test->max_pts * av_q2d(av_inv_q(test->frame_rate)));
267  init_idct();
268 
269  return 0;
270 }
271 
272 static int config_props(AVFilterLink *outlink)
273 {
274  AVFilterContext *ctx = outlink->src;
275  MPTestContext *test = ctx->priv;
276  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(outlink->format);
277 
278  test->hsub = pix_desc->log2_chroma_w;
279  test->vsub = pix_desc->log2_chroma_h;
280 
281  outlink->w = WIDTH;
282  outlink->h = HEIGHT;
283  outlink->time_base = av_inv_q(test->frame_rate);
284 
285  return 0;
286 }
287 
289 {
290  static const enum AVPixelFormat pix_fmts[] = {
292  };
293 
295  if (!fmts_list)
296  return AVERROR(ENOMEM);
297  return ff_set_common_formats(ctx, fmts_list);
298 }
299 
300 static int request_frame(AVFilterLink *outlink)
301 {
302  MPTestContext *test = outlink->src->priv;
303  AVFrame *picref;
304  int w = WIDTH, h = HEIGHT,
305  cw = AV_CEIL_RSHIFT(w, test->hsub), ch = AV_CEIL_RSHIFT(h, test->vsub);
306  unsigned int frame = outlink->frame_count_in;
307  enum test_type tt = test->test;
308  int i;
309 
310  if (test->max_pts >= 0 && test->pts > test->max_pts)
311  return AVERROR_EOF;
312  picref = ff_get_video_buffer(outlink, w, h);
313  if (!picref)
314  return AVERROR(ENOMEM);
315  picref->pts = test->pts++;
316 
317  // clean image
318  for (i = 0; i < h; i++)
319  memset(picref->data[0] + i*picref->linesize[0], 0, w);
320  for (i = 0; i < ch; i++) {
321  memset(picref->data[1] + i*picref->linesize[1], 128, cw);
322  memset(picref->data[2] + i*picref->linesize[2], 128, cw);
323  }
324 
325  if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
326  tt = (frame/30)%(TEST_NB-1);
327 
328  switch (tt) {
329  case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;
330  case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;
331  case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break;
332  case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;
333  case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break;
334  case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break;
335  case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break;
336  case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break;
337  case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break;
338  case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
339  }
340 
341  return ff_filter_frame(outlink, picref);
342 }
343 
344 static const AVFilterPad mptestsrc_outputs[] = {
345  {
346  .name = "default",
347  .type = AVMEDIA_TYPE_VIDEO,
348  .request_frame = request_frame,
349  .config_props = config_props,
350  },
351  { NULL }
352 };
353 
355  .name = "mptestsrc",
356  .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
357  .priv_size = sizeof(MPTestContext),
358  .priv_class = &mptestsrc_class,
359  .init = init,
361  .inputs = NULL,
363 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
draw_basis
static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc)
Definition: vsrc_mptestsrc.c:138
HEIGHT
#define HEIGHT
Definition: vsrc_mptestsrc.c:36
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
idct
static void idct(uint8_t *dst, int dst_linesize, int src[64])
Definition: vsrc_mptestsrc.c:101
color
Definition: vf_paletteuse.c:588
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:236
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
TEST_ALL
@ TEST_ALL
Definition: vsrc_mptestsrc.c:49
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:246
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:237
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
video.h
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(mptestsrc)
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
TEST_NB
@ TEST_NB
Definition: vsrc_mptestsrc.c:50
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:353
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vsrc_mptestsrc.c:288
ring2_test
static void ring2_test(uint8_t *dst, int dst_linesize, int off)
Definition: vsrc_mptestsrc.c:237
init
static av_cold int init(AVFilterContext *ctx)
Definition: vsrc_mptestsrc.c:256
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
lrint
#define lrint
Definition: tablegen.h:53
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
duration
int64_t duration
Definition: movenc.c:63
mptestsrc_options
static const AVOption mptestsrc_options[]
Definition: vsrc_mptestsrc.c:63
TEST_AMP_CHROMA
@ TEST_AMP_CHROMA
Definition: vsrc_mptestsrc.c:44
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
TEST_DC_CHROMA
@ TEST_DC_CHROMA
Definition: vsrc_mptestsrc.c:40
MPTestContext::pts
int64_t pts
Definition: vsrc_mptestsrc.c:56
MPTestContext::duration
int64_t duration
Definition: vsrc_mptestsrc.c:56
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
TEST_FREQ_CHROMA
@ TEST_FREQ_CHROMA
Definition: vsrc_mptestsrc.c:42
ring1_test
static void ring1_test(uint8_t *dst, int dst_linesize, int off)
Definition: vsrc_mptestsrc.c:225
OFFSET
#define OFFSET(x)
Definition: vsrc_mptestsrc.c:61
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
TEST_RING2
@ TEST_RING2
Definition: vsrc_mptestsrc.c:48
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
parseutils.h
TEST_FREQ_LUMA
@ TEST_FREQ_LUMA
Definition: vsrc_mptestsrc.c:41
MPTestContext::test
int test
test_type
Definition: vsrc_mptestsrc.c:58
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
amp_test
static void amp_test(uint8_t *dst, int dst_linesize, int off)
Definition: vsrc_mptestsrc.c:184
MPTestContext
Definition: vsrc_mptestsrc.c:53
FLAGS
#define FLAGS
Definition: vsrc_mptestsrc.c:62
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
TEST_CBP
@ TEST_CBP
Definition: vsrc_mptestsrc.c:45
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
M_PI
#define M_PI
Definition: mathematics.h:52
dc_test
static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off)
Definition: vsrc_mptestsrc.c:159
internal.h
MPTestContext::max_pts
int64_t max_pts
Definition: vsrc_mptestsrc.c:56
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vsrc_mptestsrc.c:300
mptestsrc_outputs
static const AVFilterPad mptestsrc_outputs[]
Definition: vsrc_mptestsrc.c:344
uint8_t
uint8_t
Definition: audio_convert.c:194
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
MPTestContext::hsub
int hsub
Definition: vsrc_mptestsrc.c:57
TEST_DC_LUMA
@ TEST_DC_LUMA
Definition: vsrc_mptestsrc.c:39
freq_test
static void freq_test(uint8_t *dst, int dst_linesize, int off)
Definition: vsrc_mptestsrc.c:172
AVFilter
Filter definition.
Definition: avfilter.h:144
TEST_RING1
@ TEST_RING1
Definition: vsrc_mptestsrc.c:47
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
test_type
test_type
Definition: vsrc_mptestsrc.c:38
TEST_AMP_LUMA
@ TEST_AMP_LUMA
Definition: vsrc_mptestsrc.c:43
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
test
static void test(const char *pattern, const char *host)
Definition: noproxy.c:23
cbp_test
static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off)
Definition: vsrc_mptestsrc.c:196
draw_cbp
static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc)
Definition: vsrc_mptestsrc.c:149
c
static double c[64]
Definition: vsrc_mptestsrc.c:87
MPTestContext::frame_rate
AVRational frame_rate
Definition: vsrc_mptestsrc.c:55
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
ff_vsrc_mptestsrc
AVFilter ff_vsrc_mptestsrc
Definition: vsrc_mptestsrc.c:354
init_idct
static void init_idct(void)
Definition: vsrc_mptestsrc.c:89
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
TEST_MV
@ TEST_MV
Definition: vsrc_mptestsrc.c:46
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
WIDTH
#define WIDTH
Definition: vsrc_mptestsrc.c:35
draw_dc
static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h)
Definition: vsrc_mptestsrc.c:129
h
h
Definition: vp9dsp_template.c:2038
avstring.h
mv_test
static void mv_test(uint8_t *dst, int dst_linesize, int off)
Definition: vsrc_mptestsrc.c:213
config_props
static int config_props(AVFilterLink *outlink)
Definition: vsrc_mptestsrc.c:272
int
int
Definition: ffmpeg_filter.c:191
MPTestContext::vsub
int vsub
Definition: vsrc_mptestsrc.c:57
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101