FFmpeg
vf_spp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Simple post processing filter
25  *
26  * This implementation is based on an algorithm described in
27  * "Aria Nosratinia Embedded Post-Processing for
28  * Enhancement of Compressed Images (1999)"
29  *
30  * Originally written by Michael Niedermayer for the MPlayer project, and
31  * ported by Clément Bœsch for FFmpeg.
32  */
33 
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/mem_internal.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/pixdesc.h"
39 #include "internal.h"
40 #include "qp_table.h"
41 #include "vf_spp.h"
42 #include "video.h"
43 
44 enum mode {
48 };
49 
50 static const AVClass *child_class_iterate(void **iter)
51 {
52  const AVClass *c = *iter ? NULL : avcodec_dct_get_class();
53  *iter = (void*)(uintptr_t)c;
54  return c;
55 }
56 
57 static void *child_next(void *obj, void *prev)
58 {
59  SPPContext *s = obj;
60  return prev ? NULL : s->dct;
61 }
62 
63 #define OFFSET(x) offsetof(SPPContext, x)
64 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
66 static const AVOption spp_options[] = {
67  { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, TFLAGS },
68  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
69  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" },
70  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
71  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
72  { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
73  { NULL }
74 };
75 
76 static const AVClass spp_class = {
77  .class_name = "spp",
78  .item_name = av_default_item_name,
79  .option = spp_options,
80  .version = LIBAVUTIL_VERSION_INT,
81  .category = AV_CLASS_CATEGORY_FILTER,
82  .child_class_iterate = child_class_iterate,
84 };
85 
86 // XXX: share between filters?
87 DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
88  { 0, 48, 12, 60, 3, 51, 15, 63 },
89  { 32, 16, 44, 28, 35, 19, 47, 31 },
90  { 8, 56, 4, 52, 11, 59, 7, 55 },
91  { 40, 24, 36, 20, 43, 27, 39, 23 },
92  { 2, 50, 14, 62, 1, 49, 13, 61 },
93  { 34, 18, 46, 30, 33, 17, 45, 29 },
94  { 10, 58, 6, 54, 9, 57, 5, 53 },
95  { 42, 26, 38, 22, 41, 25, 37, 21 },
96 };
97 
98 static const uint8_t offset[128][2] = {
99  {0,0}, // unused
100  {0,0},
101  {0,0}, {4,4}, // quality = 1
102  {0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
103  {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
104 
105  {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
106  {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
107 
108  {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
109  {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
110  {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
111  {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
112 
113  {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
114  {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
115  {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
116  {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
117  {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
118  {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
119  {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
120  {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
121 };
122 
123 static void hardthresh_c(int16_t dst[64], const int16_t src[64],
124  int qp, const uint8_t *permutation)
125 {
126  int i;
127  int bias = 0; // FIXME
128 
129  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
130  unsigned threshold2 = threshold1 << 1;
131 
132  memset(dst, 0, 64 * sizeof(dst[0]));
133  dst[0] = (src[0] + 4) >> 3;
134 
135  for (i = 1; i < 64; i++) {
136  int level = src[i];
137  if (((unsigned)(level + threshold1)) > threshold2) {
138  const int j = permutation[i];
139  dst[j] = (level + 4) >> 3;
140  }
141  }
142 }
143 
144 static void softthresh_c(int16_t dst[64], const int16_t src[64],
145  int qp, const uint8_t *permutation)
146 {
147  int i;
148  int bias = 0; //FIXME
149 
150  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
151  unsigned threshold2 = threshold1 << 1;
152 
153  memset(dst, 0, 64 * sizeof(dst[0]));
154  dst[0] = (src[0] + 4) >> 3;
155 
156  for (i = 1; i < 64; i++) {
157  int level = src[i];
158  if (((unsigned)(level + threshold1)) > threshold2) {
159  const int j = permutation[i];
160  if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
161  else dst[j] = (level + threshold1 + 4) >> 3;
162  }
163  }
164 }
165 
166 static void store_slice_c(uint8_t *dst, const int16_t *src,
167  int dst_linesize, int src_linesize,
168  int width, int height, int log2_scale,
169  const uint8_t dither[8][8])
170 {
171  int y, x;
172 
173 #define STORE(pos) do { \
174  temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \
175  if (temp & 0x100) \
176  temp = ~(temp >> 31); \
177  dst[x + y*dst_linesize + pos] = temp; \
178 } while (0)
179 
180  for (y = 0; y < height; y++) {
181  const uint8_t *d = dither[y];
182  for (x = 0; x < width; x += 8) {
183  int temp;
184  STORE(0);
185  STORE(1);
186  STORE(2);
187  STORE(3);
188  STORE(4);
189  STORE(5);
190  STORE(6);
191  STORE(7);
192  }
193  }
194 }
195 
196 static void store_slice16_c(uint16_t *dst, const int16_t *src,
197  int dst_linesize, int src_linesize,
198  int width, int height, int log2_scale,
199  const uint8_t dither[8][8], int depth)
200 {
201  int y, x;
202  unsigned int mask = -1<<depth;
203 
204 #define STORE16(pos) do { \
205  temp = ((src[x + y*src_linesize + pos] << log2_scale) + (d[pos]>>1)) >> 5; \
206  if (temp & mask ) \
207  temp = ~(temp >> 31); \
208  dst[x + y*dst_linesize + pos] = temp; \
209 } while (0)
210 
211  for (y = 0; y < height; y++) {
212  const uint8_t *d = dither[y];
213  for (x = 0; x < width; x += 8) {
214  int temp;
215  STORE16(0);
216  STORE16(1);
217  STORE16(2);
218  STORE16(3);
219  STORE16(4);
220  STORE16(5);
221  STORE16(6);
222  STORE16(7);
223  }
224  }
225 }
226 
227 static inline void add_block(uint16_t *dst, int linesize, const int16_t block[64])
228 {
229  int y;
230 
231  for (y = 0; y < 8; y++) {
232  dst[0 + y*linesize] += block[0 + y*8];
233  dst[1 + y*linesize] += block[1 + y*8];
234  dst[2 + y*linesize] += block[2 + y*8];
235  dst[3 + y*linesize] += block[3 + y*8];
236  dst[4 + y*linesize] += block[4 + y*8];
237  dst[5 + y*linesize] += block[5 + y*8];
238  dst[6 + y*linesize] += block[6 + y*8];
239  dst[7 + y*linesize] += block[7 + y*8];
240  }
241 }
242 
243 static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
244  int dst_linesize, int src_linesize, int width, int height,
245  const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
246 {
247  int x, y, i;
248  const int count = 1 << p->log2_count;
249  const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
250  DECLARE_ALIGNED(16, uint64_t, block_align)[32];
251  int16_t *block = (int16_t *)block_align;
252  int16_t *block2 = (int16_t *)(block_align + 16);
253  uint16_t *psrc16 = (uint16_t*)p->src;
254  const int sample_bytes = (depth+7) / 8;
255 
256  for (y = 0; y < height; y++) {
257  int index = 8 + 8*linesize + y*linesize;
258  memcpy(p->src + index*sample_bytes, src + y*src_linesize, width*sample_bytes);
259  if (sample_bytes == 1) {
260  for (x = 0; x < 8; x++) {
261  p->src[index - x - 1] = p->src[index + x ];
262  p->src[index + width + x ] = p->src[index + width - x - 1];
263  }
264  } else {
265  for (x = 0; x < 8; x++) {
266  psrc16[index - x - 1] = psrc16[index + x ];
267  psrc16[index + width + x ] = psrc16[index + width - x - 1];
268  }
269  }
270  }
271  for (y = 0; y < 8; y++) {
272  memcpy(p->src + ( 7-y)*linesize * sample_bytes, p->src + ( y+8)*linesize * sample_bytes, linesize * sample_bytes);
273  memcpy(p->src + (height+8+y)*linesize * sample_bytes, p->src + (height-y+7)*linesize * sample_bytes, linesize * sample_bytes);
274  }
275 
276  for (y = 0; y < height + 8; y += 8) {
277  memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
278  for (x = 0; x < width + 8; x += 8) {
279  int qp;
280 
281  if (p->qp) {
282  qp = p->qp;
283  } else{
284  const int qps = 3 + is_luma;
285  qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
286  qp = FFMAX(1, ff_norm_qscale(qp, p->qscale_type));
287  }
288  for (i = 0; i < count; i++) {
289  const int x1 = x + offset[i + count][0];
290  const int y1 = y + offset[i + count][1];
291  const int index = x1 + y1*linesize;
292  p->dct->get_pixels_unaligned(block, p->src + sample_bytes*index, sample_bytes*linesize);
293  p->dct->fdct(block);
294  p->requantize(block2, block, qp, p->dct->idct_permutation);
295  p->dct->idct(block2);
296  add_block(p->temp + index, linesize, block2);
297  }
298  }
299  if (y) {
300  if (sample_bytes == 1) {
301  p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
302  dst_linesize, linesize, width,
303  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
304  ldither);
305  } else {
306  store_slice16_c((uint16_t*)(dst + (y - 8) * dst_linesize), p->temp + 8 + y*linesize,
307  dst_linesize/2, linesize, width,
308  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
309  ldither, depth);
310  }
311  }
312  }
313 }
314 
315 static const enum AVPixelFormat pix_fmts[] = {
330 };
331 
333 {
334  SPPContext *s = inlink->dst->priv;
335  const int h = FFALIGN(inlink->h + 16, 16);
337  const int bps = desc->comp[0].depth;
338 
339  s->store_slice = store_slice_c;
340  switch (s->mode) {
341  case MODE_HARD: s->requantize = hardthresh_c; break;
342  case MODE_SOFT: s->requantize = softthresh_c; break;
343  }
344 
345  av_opt_set_int(s->dct, "bits_per_sample", bps, 0);
346  avcodec_dct_init(s->dct);
347 
348 #if ARCH_X86
350 #endif
351 
352  s->hsub = desc->log2_chroma_w;
353  s->vsub = desc->log2_chroma_h;
354  s->temp_linesize = FFALIGN(inlink->w + 16, 16);
355  s->temp = av_malloc_array(s->temp_linesize, h * sizeof(*s->temp));
356  s->src = av_malloc_array(s->temp_linesize, h * sizeof(*s->src) * 2);
357 
358  if (!s->temp || !s->src)
359  return AVERROR(ENOMEM);
360  return 0;
361 }
362 
364 {
365  AVFilterContext *ctx = inlink->dst;
366  SPPContext *s = ctx->priv;
367  AVFilterLink *outlink = ctx->outputs[0];
368  AVFrame *out = in;
369  int qp_stride = 0;
370  int8_t *qp_table = NULL;
372  const int depth = desc->comp[0].depth;
373  int ret = 0;
374 
375  /* if we are not in a constant user quantizer mode and we don't want to use
376  * the quantizers from the B-frames (B-frames often have a higher QP), we
377  * need to save the qp table from the last non B-frame; this is what the
378  * following code block does */
379  if (!s->qp && (s->use_bframe_qp || in->pict_type != AV_PICTURE_TYPE_B)) {
380  ret = ff_qp_table_extract(in, &qp_table, &qp_stride, NULL, &s->qscale_type);
381  if (ret < 0) {
382  av_frame_free(&in);
383  return ret;
384  }
385 
386  if (!s->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
387  av_freep(&s->non_b_qp_table);
388  s->non_b_qp_table = qp_table;
389  s->non_b_qp_stride = qp_stride;
390  }
391  }
392 
393  if (s->log2_count && !ctx->is_disabled) {
394  if (!s->use_bframe_qp && s->non_b_qp_table) {
395  qp_table = s->non_b_qp_table;
396  qp_stride = s->non_b_qp_stride;
397  }
398 
399  if (qp_table || s->qp) {
400  const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
401  const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
402 
403  /* get a new frame if in-place is not possible or if the dimensions
404  * are not multiple of 8 */
405  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
406  const int aligned_w = FFALIGN(inlink->w, 8);
407  const int aligned_h = FFALIGN(inlink->h, 8);
408 
409  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
410  if (!out) {
411  av_frame_free(&in);
412  ret = AVERROR(ENOMEM);
413  goto finish;
414  }
416  out->width = in->width;
417  out->height = in->height;
418  }
419 
420  filter(s, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1, depth);
421 
422  if (out->data[2]) {
423  filter(s, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0, depth);
424  filter(s, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0, depth);
425  }
426  emms_c();
427  }
428  }
429 
430  if (in != out) {
431  if (in->data[3])
432  av_image_copy_plane(out->data[3], out->linesize[3],
433  in ->data[3], in ->linesize[3],
434  inlink->w, inlink->h);
435  av_frame_free(&in);
436  }
437  ret = ff_filter_frame(outlink, out);
438 finish:
439  if (qp_table != s->non_b_qp_table)
440  av_freep(&qp_table);
441  return ret;
442 }
443 
444 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
445  char *res, int res_len, int flags)
446 {
447  SPPContext *s = ctx->priv;
448 
449  if (!strcmp(cmd, "level") || !strcmp(cmd, "quality")) {
450  if (!strcmp(args, "max"))
451  s->log2_count = MAX_LEVEL;
452  else
453  s->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
454  return 0;
455  }
456  return AVERROR(ENOSYS);
457 }
458 
460 {
461  SPPContext *s = ctx->priv;
462 
463  s->dct = avcodec_dct_alloc();
464  if (!s->dct)
465  return AVERROR(ENOMEM);
466 
467  return 0;
468 }
469 
471 {
472  SPPContext *s = ctx->priv;
473 
474  av_freep(&s->temp);
475  av_freep(&s->src);
476  av_freep(&s->dct);
477  av_freep(&s->non_b_qp_table);
478 }
479 
480 static const AVFilterPad spp_inputs[] = {
481  {
482  .name = "default",
483  .type = AVMEDIA_TYPE_VIDEO,
484  .config_props = config_input,
485  .filter_frame = filter_frame,
486  },
487 };
488 
490  .name = "spp",
491  .description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
492  .priv_size = sizeof(SPPContext),
493  .preinit = preinit,
494  .uninit = uninit,
498  .process_command = process_command,
499  .priv_class = &spp_class,
501 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:108
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
qp_table.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
mem_internal.h
out
FILE * out
Definition: movenc.c:54
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_spp.c:363
SPPContext::qscale_type
enum AVVideoEncParamsType qscale_type
Definition: vf_spp.h:37
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
spp_options
static const AVOption spp_options[]
Definition: vf_spp.c:66
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
MODE_SOFT
@ MODE_SOFT
Definition: vf_spp.c:46
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
ldither
static const uint8_t ldither[8][8]
Definition: vf_spp.c:87
AVFrame::width
int width
Definition: frame.h:412
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
add_block
static void add_block(uint16_t *dst, int linesize, const int16_t block[64])
Definition: vf_spp.c:227
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
store_slice16_c
static void store_slice16_c(uint16_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8], int depth)
Definition: vf_spp.c:196
ff_norm_qscale
static int ff_norm_qscale(int qscale, enum AVVideoEncParamsType type)
Normalize the qscale factor FIXME Add support for other values of enum AVVideoEncParamsType besides A...
Definition: qp_table.h:39
preinit
static av_cold int preinit(AVFilterContext *ctx)
Definition: vf_spp.c:459
video.h
AVDCT::fdct
void(* fdct)(int16_t *block)
Definition: avdct.h:50
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_spp.c:444
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
SPPContext::requantize
void(* requantize)(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.h:52
finish
static void finish(void)
Definition: movenc.c:342
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
SPPContext::dct
AVDCT * dct
Definition: vf_spp.h:41
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:466
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_spp.c:50
AVDCT::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: avdct.h:48
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
SPPContext::qp
int qp
Definition: vf_spp.h:35
SPPContext::temp_linesize
int temp_linesize
Definition: vf_spp.h:38
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_spp.c:315
spp_class
static const AVClass spp_class
Definition: vf_spp.c:76
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: vf_spp.c:63
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:36
NB_MODES
@ NB_MODES
Definition: vf_spp.c:47
mask
static const uint16_t mask[17]
Definition: lzw.c:38
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
emms_c
#define emms_c()
Definition: emms.h:63
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_spp.c:470
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:465
ctx
AVFormatContext * ctx
Definition: movenc.c:48
offset
static const uint8_t offset[128][2]
Definition: vf_spp.c:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
vf_spp.h
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
bias
static int bias(int x, int c)
Definition: vqcdec.c:113
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
hardthresh_c
static void hardthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:123
avcodec_dct_init
int avcodec_dct_init(AVDCT *dsp)
Definition: avdct.c:88
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AVDCT::idct
void(* idct)(int16_t *block)
Definition: avdct.h:32
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:483
SPPContext::store_slice
void(* store_slice)(uint8_t *dst, const int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.h:47
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
SPPContext
Definition: vf_spp.h:31
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
avcodec_dct_alloc
AVDCT * avcodec_dct_alloc(void)
Allocates a AVDCT context.
Definition: avdct.c:75
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
AVClass::child_next
void *(* child_next)(void *obj, void *prev)
Return next AVOptions-enabled child or NULL.
Definition: log.h:131
bps
unsigned bps
Definition: movenc.c:1738
TFLAGS
#define TFLAGS
Definition: vf_spp.c:65
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:666
height
#define height
AVDCT::get_pixels_unaligned
void(* get_pixels_unaligned)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: avdct.h:71
internal.h
ff_vf_spp
const AVFilter ff_vf_spp
Definition: vf_spp.c:489
emms.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_spp.c:332
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
avcodec_dct_get_class
const AVClass * avcodec_dct_get_class(void)
Definition: avdct.c:70
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
SPPContext::temp
uint16_t * temp
Definition: vf_spp.h:40
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:467
AVFilter
Filter definition.
Definition: avfilter.h:166
FLAGS
#define FLAGS
Definition: vf_spp.c:64
ret
ret
Definition: filter_design.txt:187
SPPContext::src
uint8_t * src
Definition: vf_spp.h:39
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
ff_qp_table_extract
int ff_qp_table_extract(AVFrame *frame, int8_t **table, int *table_w, int *table_h, enum AVVideoEncParamsType *qscale_type)
Extract a libpostproc-compatible QP table - an 8-bit QP value per 16x16 macroblock,...
Definition: qp_table.c:27
spp_inputs
static const AVFilterPad spp_inputs[]
Definition: vf_spp.c:480
store_slice_c
static void store_slice_c(uint8_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.c:166
AVFrame::height
int height
Definition: frame.h:412
filter
static void filter(SPPContext *p, uint8_t *dst, uint8_t *src, int dst_linesize, int src_linesize, int width, int height, const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
Definition: vf_spp.c:243
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
temp
else temp
Definition: vf_mcdeint.c:263
STORE
#define STORE(pos)
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:368
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
STORE16
#define STORE16(pos)
child_next
static void * child_next(void *obj, void *prev)
Definition: vf_spp.c:57
SPPContext::log2_count
int log2_count
Definition: vf_spp.h:34
MODE_HARD
@ MODE_HARD
Definition: vf_spp.c:45
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_spp_init_x86
av_cold void ff_spp_init_x86(SPPContext *s)
Definition: vf_spp.c:220
softthresh_c
static void softthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:144
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:60