FFmpeg
vf_pp7.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Postprocessing filter - 7
25  *
26  * Originally written by Michael Niedermayer for the MPlayer
27  * project, and ported by Arwa Arif for FFmpeg.
28  */
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/mem_internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 #include "qp_table.h"
36 #include "vf_pp7.h"
37 
38 enum mode {
42 };
43 
44 #define OFFSET(x) offsetof(PP7Context, x)
45 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
46 static const AVOption pp7_options[] = {
47  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
48  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
49  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
50  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
51  { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
52  { NULL }
53 };
54 
56 
57 DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
58  { 0, 48, 12, 60, 3, 51, 15, 63, },
59  { 32, 16, 44, 28, 35, 19, 47, 31, },
60  { 8, 56, 4, 52, 11, 59, 7, 55, },
61  { 40, 24, 36, 20, 43, 27, 39, 23, },
62  { 2, 50, 14, 62, 1, 49, 13, 61, },
63  { 34, 18, 46, 30, 33, 17, 45, 29, },
64  { 10, 58, 6, 54, 9, 57, 5, 53, },
65  { 42, 26, 38, 22, 41, 25, 37, 21, },
66 };
67 
68 #define N0 4
69 #define N1 5
70 #define N2 10
71 #define SN0 2
72 #define SN1 2.2360679775
73 #define SN2 3.16227766017
74 #define N (1 << 16)
75 
76 static const int factor[16] = {
77  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
78  N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
79  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
80  N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
81 };
82 
83 static void init_thres2(PP7Context *p)
84 {
85  int qp, i;
86  int bias = 0; //FIXME
87 
88  for (qp = 0; qp < 99; qp++) {
89  for (i = 0; i < 16; i++) {
90  p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
91  }
92  }
93 }
94 
95 static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
96 {
97  int i;
98 
99  for (i = 0; i < 4; i++) {
100  int s0 = src[0 * stride] + src[6 * stride];
101  int s1 = src[1 * stride] + src[5 * stride];
102  int s2 = src[2 * stride] + src[4 * stride];
103  int s3 = src[3 * stride];
104  int s = s3 + s3;
105  s3 = s - s0;
106  s0 = s + s0;
107  s = s2 + s1;
108  s2 = s2 - s1;
109  dst[0] = s0 + s;
110  dst[2] = s0 - s;
111  dst[1] = 2 * s3 + s2;
112  dst[3] = s3 - 2 * s2;
113  src++;
114  dst += 4;
115  }
116 }
117 
118 static void dctB_c(int16_t *dst, int16_t *src)
119 {
120  int i;
121 
122  for (i = 0; i < 4; i++) {
123  int s0 = src[0 * 4] + src[6 * 4];
124  int s1 = src[1 * 4] + src[5 * 4];
125  int s2 = src[2 * 4] + src[4 * 4];
126  int s3 = src[3 * 4];
127  int s = s3 + s3;
128  s3 = s - s0;
129  s0 = s + s0;
130  s = s2 + s1;
131  s2 = s2 - s1;
132  dst[0 * 4] = s0 + s;
133  dst[2 * 4] = s0 - s;
134  dst[1 * 4] = 2 * s3 + s2;
135  dst[3 * 4] = s3 - 2 * s2;
136  src++;
137  dst++;
138  }
139 }
140 
141 static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
142 {
143  int i;
144  int a;
145 
146  a = src[0] * factor[0];
147  for (i = 1; i < 16; i++) {
148  unsigned int threshold1 = p->thres2[qp][i];
149  unsigned int threshold2 = threshold1 << 1;
150  int level = src[i];
151  if (((unsigned)(level + threshold1)) > threshold2)
152  a += level * factor[i];
153  }
154  return (a + (1 << 11)) >> 12;
155 }
156 
157 static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
158 {
159  int i;
160  int a;
161 
162  a = src[0] * factor[0];
163  for (i = 1; i < 16; i++) {
164  unsigned int threshold1 = p->thres2[qp][i];
165  unsigned int threshold2 = threshold1 << 1;
166  int level = src[i];
167  if (((unsigned)(level + threshold1)) > threshold2) {
168  if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
169  a += level * factor[i];
170  else {
171  if (level > 0)
172  a += 2 * (level - (int)threshold1) * factor[i];
173  else
174  a += 2 * (level + (int)threshold1) * factor[i];
175  }
176  }
177  }
178  return (a + (1 << 11)) >> 12;
179 }
180 
181 static int softthresh_c(PP7Context *p, int16_t *src, int qp)
182 {
183  int i;
184  int a;
185 
186  a = src[0] * factor[0];
187  for (i = 1; i < 16; i++) {
188  unsigned int threshold1 = p->thres2[qp][i];
189  unsigned int threshold2 = threshold1 << 1;
190  int level = src[i];
191  if (((unsigned)(level + threshold1)) > threshold2) {
192  if (level > 0)
193  a += (level - (int)threshold1) * factor[i];
194  else
195  a += (level + (int)threshold1) * factor[i];
196  }
197  }
198  return (a + (1 << 11)) >> 12;
199 }
200 
201 static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
202  int dst_stride, int src_stride,
203  int width, int height,
204  uint8_t *qp_store, int qp_stride, int is_luma)
205 {
206  int x, y;
207  const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
208  uint8_t *p_src = p->src + 8 * stride;
209  int16_t *block = (int16_t *)p->src;
210  int16_t *temp = (int16_t *)(p->src + 32);
211 
212  if (!src || !dst) return;
213  for (y = 0; y < height; y++) {
214  int index = 8 + 8 * stride + y * stride;
215  memcpy(p_src + index, src + y * src_stride, width);
216  for (x = 0; x < 8; x++) {
217  p_src[index - x - 1]= p_src[index + x ];
218  p_src[index + width + x ]= p_src[index + width - x - 1];
219  }
220  }
221  for (y = 0; y < 8; y++) {
222  memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
223  memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
224  }
225  //FIXME (try edge emu)
226 
227  for (y = 0; y < height; y++) {
228  for (x = -8; x < 0; x += 4) {
229  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
230  uint8_t *src = p_src + index;
231  int16_t *tp = temp + 4 * x;
232 
233  dctA_c(tp + 4 * 8, src, stride);
234  }
235  for (x = 0; x < width; ) {
236  const int qps = 3 + is_luma;
237  int qp;
238  int end = FFMIN(x + 8, width);
239 
240  if (p->qp)
241  qp = p->qp;
242  else {
243  qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
244  qp = ff_norm_qscale(qp, p->qscale_type);
245  }
246  for (; x < end; x++) {
247  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
248  uint8_t *src = p_src + index;
249  int16_t *tp = temp + 4 * x;
250  int v;
251 
252  if ((x & 3) == 0)
253  dctA_c(tp + 4 * 8, src, stride);
254 
255  p->dctB(block, tp);
256 
257  v = p->requantize(p, block, qp);
258  v = (v + dither[y & 7][x & 7]) >> 6;
259  if ((unsigned)v > 255)
260  v = (-v) >> 31;
261  dst[x + y * dst_stride] = v;
262  }
263  }
264  }
265 }
266 
267 static const enum AVPixelFormat pix_fmts[] = {
275 };
276 
278 {
279  AVFilterContext *ctx = inlink->dst;
280  PP7Context *pp7 = ctx->priv;
281  const int h = FFALIGN(inlink->h + 16, 16);
283 
284  pp7->hsub = desc->log2_chroma_w;
285  pp7->vsub = desc->log2_chroma_h;
286 
287  pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
288  pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
289 
290  if (!pp7->src)
291  return AVERROR(ENOMEM);
292 
293  init_thres2(pp7);
294 
295  switch (pp7->mode) {
296  case 0: pp7->requantize = hardthresh_c; break;
297  case 1: pp7->requantize = softthresh_c; break;
298  default:
299  case 2: pp7->requantize = mediumthresh_c; break;
300  }
301 
302  pp7->dctB = dctB_c;
303 
304 #if ARCH_X86
305  ff_pp7_init_x86(pp7);
306 #endif
307 
308  return 0;
309 }
310 
312 {
313  AVFilterContext *ctx = inlink->dst;
314  PP7Context *pp7 = ctx->priv;
315  AVFilterLink *outlink = ctx->outputs[0];
316  AVFrame *out = in;
317 
318  int qp_stride = 0;
319  int8_t *qp_table = NULL;
320 
321  if (!pp7->qp) {
322  int ret = ff_qp_table_extract(in, &qp_table, &qp_stride, NULL, &pp7->qscale_type);
323  if (ret < 0) {
324  av_frame_free(&in);
325  return ret;
326  }
327  }
328 
329  if (!ctx->is_disabled) {
330  const int cw = AV_CEIL_RSHIFT(inlink->w, pp7->hsub);
331  const int ch = AV_CEIL_RSHIFT(inlink->h, pp7->vsub);
332 
333  /* get a new frame if in-place is not possible or if the dimensions
334  * are not multiple of 8 */
335  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
336  const int aligned_w = FFALIGN(inlink->w, 8);
337  const int aligned_h = FFALIGN(inlink->h, 8);
338 
339  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
340  if (!out) {
341  av_frame_free(&in);
342  av_freep(&qp_table);
343  return AVERROR(ENOMEM);
344  }
346  out->width = in->width;
347  out->height = in->height;
348  }
349 
350  if (qp_table || pp7->qp) {
351 
352  filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
353  inlink->w, inlink->h, qp_table, qp_stride, 1);
354  filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
355  cw, ch, qp_table, qp_stride, 0);
356  filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
357  cw, ch, qp_table, qp_stride, 0);
358  emms_c();
359  }
360  }
361 
362  if (in != out) {
363  if (in->data[3])
364  av_image_copy_plane(out->data[3], out->linesize[3],
365  in ->data[3], in ->linesize[3],
366  inlink->w, inlink->h);
367  av_frame_free(&in);
368  }
369  av_freep(&qp_table);
370  return ff_filter_frame(outlink, out);
371 }
372 
374 {
375  PP7Context *pp7 = ctx->priv;
376  av_freep(&pp7->src);
377 }
378 
379 static const AVFilterPad pp7_inputs[] = {
380  {
381  .name = "default",
382  .type = AVMEDIA_TYPE_VIDEO,
383  .config_props = config_input,
384  .filter_frame = filter_frame,
385  },
386 };
387 
388 static const AVFilterPad pp7_outputs[] = {
389  {
390  .name = "default",
391  .type = AVMEDIA_TYPE_VIDEO,
392  },
393 };
394 
396  .name = "pp7",
397  .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
398  .priv_size = sizeof(PP7Context),
399  .uninit = uninit,
403  .priv_class = &pp7_class,
405 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
OFFSET
#define OFFSET(x)
Definition: vf_pp7.c:44
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
qp_table.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
mem_internal.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AVFrame::width
int width
Definition: frame.h:402
pp7_options
static const AVOption pp7_options[]
Definition: vf_pp7.c:46
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
N2
#define N2
Definition: vf_pp7.c:70
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
ff_norm_qscale
static int ff_norm_qscale(int qscale, enum AVVideoEncParamsType type)
Normalize the qscale factor FIXME Add support for other values of enum AVVideoEncParamsType besides A...
Definition: qp_table.h:39
MODE_HARD
@ MODE_HARD
Definition: vf_pp7.c:39
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
init_thres2
static void init_thres2(PP7Context *p)
Definition: vf_pp7.c:83
PP7Context::requantize
int(* requantize)(struct PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.h:40
s3
#define s3
Definition: regdef.h:40
softthresh_c
static int softthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:181
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(pp7)
dctB_c
static void dctB_c(int16_t *dst, int16_t *src)
Definition: vf_pp7.c:118
FLAGS
#define FLAGS
Definition: vf_pp7.c:45
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
SN0
#define SN0
Definition: vf_pp7.c:71
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_pp7.c:373
av_cold
#define av_cold
Definition: attributes.h:90
hardthresh_c
static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:141
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
s1
#define s1
Definition: regdef.h:38
ctx
AVFormatContext * ctx
Definition: movenc.c:48
filter
static void filter(PP7Context *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma)
Definition: vf_pp7.c:201
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
ff_vf_pp7
const AVFilter ff_vf_pp7
Definition: vf_pp7.c:395
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
bias
static int bias(int x, int c)
Definition: vqcdec.c:113
N
#define N
Definition: vf_pp7.c:74
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
mediumthresh_c
static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:157
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
index
int index
Definition: gxfenc.c:89
PP7Context::temp_stride
int temp_stride
Definition: vf_pp7.h:37
s2
#define s2
Definition: regdef.h:39
SN2
#define SN2
Definition: vf_pp7.c:73
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
N0
#define N0
Definition: vf_pp7.c:68
dither
static const uint8_t dither[8][8]
Definition: vf_pp7.c:57
pp7_outputs
static const AVFilterPad pp7_outputs[]
Definition: vf_pp7.c:388
vf_pp7.h
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_pp7.c:311
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
dctA_c
static void dctA_c(int16_t *dst, uint8_t *src, int stride)
Definition: vf_pp7.c:95
PP7Context::hsub
int hsub
Definition: vf_pp7.h:35
internal.h
PP7Context::qscale_type
enum AVVideoEncParamsType qscale_type
Definition: vf_pp7.h:34
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
PP7Context::src
uint8_t * src
Definition: vf_pp7.h:38
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
MODE_SOFT
@ MODE_SOFT
Definition: vf_pp7.c:40
PP7Context::thres2
int thres2[99][16]
Definition: vf_pp7.h:30
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
pp7_inputs
static const AVFilterPad pp7_inputs[]
Definition: vf_pp7.c:379
PP7Context::vsub
int vsub
Definition: vf_pp7.h:36
PP7Context::qp
int qp
Definition: vf_pp7.h:32
stride
#define stride
Definition: h264pred_template.c:537
AVFilter
Filter definition.
Definition: avfilter.h:161
ret
ret
Definition: filter_design.txt:187
ff_qp_table_extract
int ff_qp_table_extract(AVFrame *frame, int8_t **table, int *table_w, int *table_h, enum AVVideoEncParamsType *qscale_type)
Extract a libpostproc-compatible QP table - an 8-bit QP value per 16x16 macroblock,...
Definition: qp_table.c:27
ff_pp7_init_x86
void ff_pp7_init_x86(PP7Context *pp7)
Definition: vf_pp7_init.c:28
N1
#define N1
Definition: vf_pp7.c:69
AVFrame::height
int height
Definition: frame.h:402
PP7Context::mode
int mode
Definition: vf_pp7.h:33
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
temp
else temp
Definition: vf_mcdeint.c:248
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
factor
static const int factor[16]
Definition: vf_pp7.c:76
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
s0
#define s0
Definition: regdef.h:37
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MODE_MEDIUM
@ MODE_MEDIUM
Definition: vf_pp7.c:41
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
PP7Context
Definition: vf_pp7.h:28
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:150
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_pp7.c:267
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_pp7.c:277
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
PP7Context::dctB
void(* dctB)(int16_t *dst, int16_t *src)
Definition: vf_pp7.h:41