FFmpeg
vf_paletteuse.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Stupeflix
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Use a palette to downsample an input video stream.
24  */
25 
26 #include "libavutil/bprint.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/qsort.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "framesync.h"
33 #include "internal.h"
34 
43 };
44 
50 };
51 
52 enum diff_mode {
56 };
57 
58 struct color_node {
61  int split;
63 };
64 
65 #define NBITS 5
66 #define CACHE_SIZE (1<<(3*NBITS))
67 
68 struct cached_color {
69  uint32_t color;
71 };
72 
73 struct cache_node {
76 };
77 
78 struct PaletteUseContext;
79 
81  int x_start, int y_start, int width, int height);
82 
83 typedef struct PaletteUseContext {
84  const AVClass *class;
86  struct cache_node cache[CACHE_SIZE]; /* lookup cache */
87  struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */
88  uint32_t palette[AVPALETTE_COUNT];
89  int transparency_index; /* index in the palette of transparency. -1 if there is no transparency in the palette. */
92  int dither;
93  int new;
96  int ordered_dither[8*8];
97  int diff_mode;
100 
101  /* debug options */
105  uint64_t total_mean_err;
108 
109 #define OFFSET(x) offsetof(PaletteUseContext, x)
110 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
111 static const AVOption paletteuse_options[] = {
112  { "dither", "select dithering mode", OFFSET(dither), AV_OPT_TYPE_INT, {.i64=DITHERING_SIERRA2_4A}, 0, NB_DITHERING-1, FLAGS, "dithering_mode" },
113  { "bayer", "ordered 8x8 bayer dithering (deterministic)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_BAYER}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
114  { "heckbert", "dithering as defined by Paul Heckbert in 1982 (simple error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_HECKBERT}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
115  { "floyd_steinberg", "Floyd and Steingberg dithering (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_FLOYD_STEINBERG}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
116  { "sierra2", "Frankie Sierra dithering v2 (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_SIERRA2}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
117  { "sierra2_4a", "Frankie Sierra dithering v2 \"Lite\" (error diffusion)", 0, AV_OPT_TYPE_CONST, {.i64=DITHERING_SIERRA2_4A}, INT_MIN, INT_MAX, FLAGS, "dithering_mode" },
118  { "bayer_scale", "set scale for bayer dithering", OFFSET(bayer_scale), AV_OPT_TYPE_INT, {.i64=2}, 0, 5, FLAGS },
119  { "diff_mode", "set frame difference mode", OFFSET(diff_mode), AV_OPT_TYPE_INT, {.i64=DIFF_MODE_NONE}, 0, NB_DIFF_MODE-1, FLAGS, "diff_mode" },
120  { "rectangle", "process smallest different rectangle", 0, AV_OPT_TYPE_CONST, {.i64=DIFF_MODE_RECTANGLE}, INT_MIN, INT_MAX, FLAGS, "diff_mode" },
121  { "new", "take new palette for each output frame", OFFSET(new), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
122  { "alpha_threshold", "set the alpha threshold for transparency", OFFSET(trans_thresh), AV_OPT_TYPE_INT, {.i64=128}, 0, 255, FLAGS },
123 
124  /* following are the debug options, not part of the official API */
125  { "debug_kdtree", "save Graphviz graph of the kdtree in specified file", OFFSET(dot_filename), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
126  { "color_search", "set reverse colormap color search method", OFFSET(color_search_method), AV_OPT_TYPE_INT, {.i64=COLOR_SEARCH_NNS_ITERATIVE}, 0, NB_COLOR_SEARCHES-1, FLAGS, "search" },
127  { "nns_iterative", "iterative search", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_NNS_ITERATIVE}, INT_MIN, INT_MAX, FLAGS, "search" },
128  { "nns_recursive", "recursive search", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_NNS_RECURSIVE}, INT_MIN, INT_MAX, FLAGS, "search" },
129  { "bruteforce", "brute-force into the palette", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_SEARCH_BRUTEFORCE}, INT_MIN, INT_MAX, FLAGS, "search" },
130  { "mean_err", "compute and print mean error", OFFSET(calc_mean_err), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
131  { "debug_accuracy", "test color search accuracy", OFFSET(debug_accuracy), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
132  { NULL }
133 };
134 
135 AVFILTER_DEFINE_CLASS(paletteuse);
136 
137 static int load_apply_palette(FFFrameSync *fs);
138 
140 {
141  static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
142  static const enum AVPixelFormat inpal_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
143  static const enum AVPixelFormat out_fmts[] = {AV_PIX_FMT_PAL8, AV_PIX_FMT_NONE};
144  int ret;
146  AVFilterFormats *inpal = ff_make_format_list(inpal_fmts);
148  if (!in || !inpal || !out) {
149  av_freep(&in);
150  av_freep(&inpal);
151  av_freep(&out);
152  return AVERROR(ENOMEM);
153  }
154  if ((ret = ff_formats_ref(in , &ctx->inputs[0]->out_formats)) < 0 ||
155  (ret = ff_formats_ref(inpal, &ctx->inputs[1]->out_formats)) < 0 ||
156  (ret = ff_formats_ref(out , &ctx->outputs[0]->in_formats)) < 0)
157  return ret;
158  return 0;
159 }
160 
161 static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
162 {
163  return av_clip_uint8( px >> 24 ) << 24
164  | av_clip_uint8((px >> 16 & 0xff) + ((er * scale) / (1<<shift))) << 16
165  | av_clip_uint8((px >> 8 & 0xff) + ((eg * scale) / (1<<shift))) << 8
166  | av_clip_uint8((px & 0xff) + ((eb * scale) / (1<<shift)));
167 }
168 
169 static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2, const int trans_thresh)
170 {
171  // XXX: try L*a*b with CIE76 (dL*dL + da*da + db*db)
172  const int dr = c1[1] - c2[1];
173  const int dg = c1[2] - c2[2];
174  const int db = c1[3] - c2[3];
175 
176  if (c1[0] < trans_thresh && c2[0] < trans_thresh) {
177  return 0;
178  } else if (c1[0] >= trans_thresh && c2[0] >= trans_thresh) {
179  return dr*dr + dg*dg + db*db;
180  } else {
181  return 255*255 + 255*255 + 255*255;
182  }
183 }
184 
185 static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *argb, const int trans_thresh)
186 {
187  int i, pal_id = -1, min_dist = INT_MAX;
188 
189  for (i = 0; i < AVPALETTE_COUNT; i++) {
190  const uint32_t c = palette[i];
191 
192  if (c >> 24 >= trans_thresh) { // ignore transparent entry
193  const uint8_t palargb[] = {
194  palette[i]>>24 & 0xff,
195  palette[i]>>16 & 0xff,
196  palette[i]>> 8 & 0xff,
197  palette[i] & 0xff,
198  };
199  const int d = diff(palargb, argb, trans_thresh);
200  if (d < min_dist) {
201  pal_id = i;
202  min_dist = d;
203  }
204  }
205  }
206  return pal_id;
207 }
208 
209 /* Recursive form, simpler but a bit slower. Kept for reference. */
211  int node_pos;
212  int dist_sqd;
213 };
214 
215 static void colormap_nearest_node(const struct color_node *map,
216  const int node_pos,
217  const uint8_t *target,
218  const int trans_thresh,
219  struct nearest_color *nearest)
220 {
221  const struct color_node *kd = map + node_pos;
222  const int s = kd->split;
223  int dx, nearer_kd_id, further_kd_id;
224  const uint8_t *current = kd->val;
225  const int current_to_target = diff(target, current, trans_thresh);
226 
227  if (current_to_target < nearest->dist_sqd) {
228  nearest->node_pos = node_pos;
229  nearest->dist_sqd = current_to_target;
230  }
231 
232  if (kd->left_id != -1 || kd->right_id != -1) {
233  dx = target[s] - current[s];
234 
235  if (dx <= 0) nearer_kd_id = kd->left_id, further_kd_id = kd->right_id;
236  else nearer_kd_id = kd->right_id, further_kd_id = kd->left_id;
237 
238  if (nearer_kd_id != -1)
239  colormap_nearest_node(map, nearer_kd_id, target, trans_thresh, nearest);
240 
241  if (further_kd_id != -1 && dx*dx < nearest->dist_sqd)
242  colormap_nearest_node(map, further_kd_id, target, trans_thresh, nearest);
243  }
244 }
245 
246 static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb, const int trans_thresh)
247 {
248  struct nearest_color res = {.dist_sqd = INT_MAX, .node_pos = -1};
249  colormap_nearest_node(node, 0, rgb, trans_thresh, &res);
250  return node[res.node_pos].palette_id;
251 }
252 
253 struct stack_node {
254  int color_id;
255  int dx2;
256 };
257 
258 static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target, const int trans_thresh)
259 {
260  int pos = 0, best_node_id = -1, best_dist = INT_MAX, cur_color_id = 0;
261  struct stack_node nodes[16];
262  struct stack_node *node = &nodes[0];
263 
264  for (;;) {
265 
266  const struct color_node *kd = &root[cur_color_id];
267  const uint8_t *current = kd->val;
268  const int current_to_target = diff(target, current, trans_thresh);
269 
270  /* Compare current color node to the target and update our best node if
271  * it's actually better. */
272  if (current_to_target < best_dist) {
273  best_node_id = cur_color_id;
274  if (!current_to_target)
275  goto end; // exact match, we can return immediately
276  best_dist = current_to_target;
277  }
278 
279  /* Check if it's not a leaf */
280  if (kd->left_id != -1 || kd->right_id != -1) {
281  const int split = kd->split;
282  const int dx = target[split] - current[split];
283  int nearer_kd_id, further_kd_id;
284 
285  /* Define which side is the most interesting. */
286  if (dx <= 0) nearer_kd_id = kd->left_id, further_kd_id = kd->right_id;
287  else nearer_kd_id = kd->right_id, further_kd_id = kd->left_id;
288 
289  if (nearer_kd_id != -1) {
290  if (further_kd_id != -1) {
291  /* Here, both paths are defined, so we push a state for
292  * when we are going back. */
293  node->color_id = further_kd_id;
294  node->dx2 = dx*dx;
295  pos++;
296  node++;
297  }
298  /* We can now update current color with the most probable path
299  * (no need to create a state since there is nothing to save
300  * anymore). */
301  cur_color_id = nearer_kd_id;
302  continue;
303  } else if (dx*dx < best_dist) {
304  /* The nearest path isn't available, so there is only one path
305  * possible and it's the least probable. We enter it only if the
306  * distance from the current point to the hyper rectangle is
307  * less than our best distance. */
308  cur_color_id = further_kd_id;
309  continue;
310  }
311  }
312 
313  /* Unstack as much as we can, typically as long as the least probable
314  * branch aren't actually probable. */
315  do {
316  if (--pos < 0)
317  goto end;
318  node--;
319  } while (node->dx2 >= best_dist);
320 
321  /* We got a node where the least probable branch might actually contain
322  * a relevant color. */
323  cur_color_id = node->color_id;
324  }
325 
326 end:
327  return root[best_node_id].palette_id;
328 }
329 
330 #define COLORMAP_NEAREST(search, palette, root, target, trans_thresh) \
331  search == COLOR_SEARCH_NNS_ITERATIVE ? colormap_nearest_iterative(root, target, trans_thresh) : \
332  search == COLOR_SEARCH_NNS_RECURSIVE ? colormap_nearest_recursive(root, target, trans_thresh) : \
333  colormap_nearest_bruteforce(palette, target, trans_thresh)
334 
335 /**
336  * Check if the requested color is in the cache already. If not, find it in the
337  * color tree and cache it.
338  * Note: a, r, g, and b are the components of color, but are passed as well to avoid
339  * recomputing them (they are generally computed by the caller for other uses).
340  */
343  const enum color_search_method search_method)
344 {
345  int i;
346  const uint8_t argb_elts[] = {a, r, g, b};
347  const uint8_t rhash = r & ((1<<NBITS)-1);
348  const uint8_t ghash = g & ((1<<NBITS)-1);
349  const uint8_t bhash = b & ((1<<NBITS)-1);
350  const unsigned hash = rhash<<(NBITS*2) | ghash<<NBITS | bhash;
351  struct cache_node *node = &s->cache[hash];
352  struct cached_color *e;
353 
354  // first, check for transparency
355  if (a < s->trans_thresh && s->transparency_index >= 0) {
356  return s->transparency_index;
357  }
358 
359  for (i = 0; i < node->nb_entries; i++) {
360  e = &node->entries[i];
361  if (e->color == color)
362  return e->pal_entry;
363  }
364 
365  e = av_dynarray2_add((void**)&node->entries, &node->nb_entries,
366  sizeof(*node->entries), NULL);
367  if (!e)
368  return AVERROR(ENOMEM);
369  e->color = color;
370  e->pal_entry = COLORMAP_NEAREST(search_method, s->palette, s->map, argb_elts, s->trans_thresh);
371 
372  return e->pal_entry;
373 }
374 
376  uint32_t c, int *er, int *eg, int *eb,
377  const enum color_search_method search_method)
378 {
379  const uint8_t a = c >> 24 & 0xff;
380  const uint8_t r = c >> 16 & 0xff;
381  const uint8_t g = c >> 8 & 0xff;
382  const uint8_t b = c & 0xff;
383  uint32_t dstc;
384  const int dstx = color_get(s, c, a, r, g, b, search_method);
385  if (dstx < 0)
386  return dstx;
387  dstc = s->palette[dstx];
388  *er = r - (dstc >> 16 & 0xff);
389  *eg = g - (dstc >> 8 & 0xff);
390  *eb = b - (dstc & 0xff);
391  return dstx;
392 }
393 
395  int x_start, int y_start, int w, int h,
396  enum dithering_mode dither,
397  const enum color_search_method search_method)
398 {
399  int x, y;
400  const int src_linesize = in ->linesize[0] >> 2;
401  const int dst_linesize = out->linesize[0];
402  uint32_t *src = ((uint32_t *)in ->data[0]) + y_start*src_linesize;
403  uint8_t *dst = out->data[0] + y_start*dst_linesize;
404 
405  w += x_start;
406  h += y_start;
407 
408  for (y = y_start; y < h; y++) {
409  for (x = x_start; x < w; x++) {
410  int er, eg, eb;
411 
412  if (dither == DITHERING_BAYER) {
413  const int d = s->ordered_dither[(y & 7)<<3 | (x & 7)];
414  const uint8_t a8 = src[x] >> 24 & 0xff;
415  const uint8_t r8 = src[x] >> 16 & 0xff;
416  const uint8_t g8 = src[x] >> 8 & 0xff;
417  const uint8_t b8 = src[x] & 0xff;
418  const uint8_t r = av_clip_uint8(r8 + d);
419  const uint8_t g = av_clip_uint8(g8 + d);
420  const uint8_t b = av_clip_uint8(b8 + d);
421  const int color = color_get(s, src[x], a8, r, g, b, search_method);
422 
423  if (color < 0)
424  return color;
425  dst[x] = color;
426 
427  } else if (dither == DITHERING_HECKBERT) {
428  const int right = x < w - 1, down = y < h - 1;
429  const int color = get_dst_color_err(s, src[x], &er, &eg, &eb, search_method);
430 
431  if (color < 0)
432  return color;
433  dst[x] = color;
434 
435  if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 3, 3);
436  if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 3, 3);
437  if (right && down) src[src_linesize + x + 1] = dither_color(src[src_linesize + x + 1], er, eg, eb, 2, 3);
438 
439  } else if (dither == DITHERING_FLOYD_STEINBERG) {
440  const int right = x < w - 1, down = y < h - 1, left = x > x_start;
441  const int color = get_dst_color_err(s, src[x], &er, &eg, &eb, search_method);
442 
443  if (color < 0)
444  return color;
445  dst[x] = color;
446 
447  if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 7, 4);
448  if (left && down) src[src_linesize + x - 1] = dither_color(src[src_linesize + x - 1], er, eg, eb, 3, 4);
449  if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 5, 4);
450  if (right && down) src[src_linesize + x + 1] = dither_color(src[src_linesize + x + 1], er, eg, eb, 1, 4);
451 
452  } else if (dither == DITHERING_SIERRA2) {
453  const int right = x < w - 1, down = y < h - 1, left = x > x_start;
454  const int right2 = x < w - 2, left2 = x > x_start + 1;
455  const int color = get_dst_color_err(s, src[x], &er, &eg, &eb, search_method);
456 
457  if (color < 0)
458  return color;
459  dst[x] = color;
460 
461  if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 4, 4);
462  if (right2) src[ x + 2] = dither_color(src[ x + 2], er, eg, eb, 3, 4);
463 
464  if (down) {
465  if (left2) src[ src_linesize + x - 2] = dither_color(src[ src_linesize + x - 2], er, eg, eb, 1, 4);
466  if (left) src[ src_linesize + x - 1] = dither_color(src[ src_linesize + x - 1], er, eg, eb, 2, 4);
467  if (1) src[ src_linesize + x ] = dither_color(src[ src_linesize + x ], er, eg, eb, 3, 4);
468  if (right) src[ src_linesize + x + 1] = dither_color(src[ src_linesize + x + 1], er, eg, eb, 2, 4);
469  if (right2) src[ src_linesize + x + 2] = dither_color(src[ src_linesize + x + 2], er, eg, eb, 1, 4);
470  }
471 
472  } else if (dither == DITHERING_SIERRA2_4A) {
473  const int right = x < w - 1, down = y < h - 1, left = x > x_start;
474  const int color = get_dst_color_err(s, src[x], &er, &eg, &eb, search_method);
475 
476  if (color < 0)
477  return color;
478  dst[x] = color;
479 
480  if (right) src[ x + 1] = dither_color(src[ x + 1], er, eg, eb, 2, 2);
481  if (left && down) src[src_linesize + x - 1] = dither_color(src[src_linesize + x - 1], er, eg, eb, 1, 2);
482  if ( down) src[src_linesize + x ] = dither_color(src[src_linesize + x ], er, eg, eb, 1, 2);
483 
484  } else {
485  const uint8_t a = src[x] >> 24 & 0xff;
486  const uint8_t r = src[x] >> 16 & 0xff;
487  const uint8_t g = src[x] >> 8 & 0xff;
488  const uint8_t b = src[x] & 0xff;
489  const int color = color_get(s, src[x], a, r, g, b, search_method);
490 
491  if (color < 0)
492  return color;
493  dst[x] = color;
494  }
495  }
496  src += src_linesize;
497  dst += dst_linesize;
498  }
499  return 0;
500 }
501 
502 #define INDENT 4
503 static void disp_node(AVBPrint *buf,
504  const struct color_node *map,
505  int parent_id, int node_id,
506  int depth)
507 {
508  const struct color_node *node = &map[node_id];
509  const uint32_t fontcolor = node->val[1] > 0x50 &&
510  node->val[2] > 0x50 &&
511  node->val[3] > 0x50 ? 0 : 0xffffff;
512  const int rgb_comp = node->split - 1;
513  av_bprintf(buf, "%*cnode%d ["
514  "label=\"%c%02X%c%02X%c%02X%c\" "
515  "fillcolor=\"#%02x%02x%02x\" "
516  "fontcolor=\"#%06"PRIX32"\"]\n",
517  depth*INDENT, ' ', node->palette_id,
518  "[ "[rgb_comp], node->val[1],
519  "][ "[rgb_comp], node->val[2],
520  " ]["[rgb_comp], node->val[3],
521  " ]"[rgb_comp],
522  node->val[1], node->val[2], node->val[3],
523  fontcolor);
524  if (parent_id != -1)
525  av_bprintf(buf, "%*cnode%d -> node%d\n", depth*INDENT, ' ',
526  map[parent_id].palette_id, node->palette_id);
527  if (node->left_id != -1) disp_node(buf, map, node_id, node->left_id, depth + 1);
528  if (node->right_id != -1) disp_node(buf, map, node_id, node->right_id, depth + 1);
529 }
530 
531 // debug_kdtree=kdtree.dot -> dot -Tpng kdtree.dot > kdtree.png
532 static int disp_tree(const struct color_node *node, const char *fname)
533 {
534  AVBPrint buf;
535  FILE *f = av_fopen_utf8(fname, "w");
536 
537  if (!f) {
538  int ret = AVERROR(errno);
539  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s' for writing: %s\n",
540  fname, av_err2str(ret));
541  return ret;
542  }
543 
545 
546  av_bprintf(&buf, "digraph {\n");
547  av_bprintf(&buf, " node [style=filled fontsize=10 shape=box]\n");
548  disp_node(&buf, node, -1, 0, 0);
549  av_bprintf(&buf, "}\n");
550 
551  fwrite(buf.str, 1, buf.len, f);
552  fclose(f);
553  av_bprint_finalize(&buf, NULL);
554  return 0;
555 }
556 
557 static int debug_accuracy(const struct color_node *node, const uint32_t *palette, const int trans_thresh,
558  const enum color_search_method search_method)
559 {
560  int r, g, b, ret = 0;
561 
562  for (r = 0; r < 256; r++) {
563  for (g = 0; g < 256; g++) {
564  for (b = 0; b < 256; b++) {
565  const uint8_t argb[] = {0xff, r, g, b};
566  const int r1 = COLORMAP_NEAREST(search_method, palette, node, argb, trans_thresh);
567  const int r2 = colormap_nearest_bruteforce(palette, argb, trans_thresh);
568  if (r1 != r2) {
569  const uint32_t c1 = palette[r1];
570  const uint32_t c2 = palette[r2];
571  const uint8_t palargb1[] = { 0xff, c1>>16 & 0xff, c1>> 8 & 0xff, c1 & 0xff };
572  const uint8_t palargb2[] = { 0xff, c2>>16 & 0xff, c2>> 8 & 0xff, c2 & 0xff };
573  const int d1 = diff(palargb1, argb, trans_thresh);
574  const int d2 = diff(palargb2, argb, trans_thresh);
575  if (d1 != d2) {
577  "/!\\ %02X%02X%02X: %d ! %d (%06"PRIX32" ! %06"PRIX32") / dist: %d ! %d\n",
578  r, g, b, r1, r2, c1 & 0xffffff, c2 & 0xffffff, d1, d2);
579  ret = 1;
580  }
581  }
582  }
583  }
584  }
585  return ret;
586 }
587 
588 struct color {
589  uint32_t value;
591 };
592 
593 struct color_rect {
596 };
597 
598 typedef int (*cmp_func)(const void *, const void *);
599 
600 #define DECLARE_CMP_FUNC(name, pos) \
601 static int cmp_##name(const void *pa, const void *pb) \
602 { \
603  const struct color *a = pa; \
604  const struct color *b = pb; \
605  return (a->value >> (8 * (3 - (pos))) & 0xff) \
606  - (b->value >> (8 * (3 - (pos))) & 0xff); \
607 }
608 
613 
614 static const cmp_func cmp_funcs[] = {cmp_a, cmp_r, cmp_g, cmp_b};
615 
616 static int get_next_color(const uint8_t *color_used, const uint32_t *palette,
617  const int trans_thresh,
618  int *component, const struct color_rect *box)
619 {
620  int wr, wg, wb;
621  int i, longest = 0;
622  unsigned nb_color = 0;
623  struct color_rect ranges;
624  struct color tmp_pal[256];
625  cmp_func cmpf;
626 
627  ranges.min[0] = ranges.min[1] = ranges.min[2] = 0xff;
628  ranges.max[0] = ranges.max[1] = ranges.max[2] = 0x00;
629 
630  for (i = 0; i < AVPALETTE_COUNT; i++) {
631  const uint32_t c = palette[i];
632  const uint8_t a = c >> 24 & 0xff;
633  const uint8_t r = c >> 16 & 0xff;
634  const uint8_t g = c >> 8 & 0xff;
635  const uint8_t b = c & 0xff;
636 
637  if (a < trans_thresh) {
638  continue;
639  }
640 
641  if (color_used[i] || (a != 0xff) ||
642  r < box->min[0] || g < box->min[1] || b < box->min[2] ||
643  r > box->max[0] || g > box->max[1] || b > box->max[2])
644  continue;
645 
646  if (r < ranges.min[0]) ranges.min[0] = r;
647  if (g < ranges.min[1]) ranges.min[1] = g;
648  if (b < ranges.min[2]) ranges.min[2] = b;
649 
650  if (r > ranges.max[0]) ranges.max[0] = r;
651  if (g > ranges.max[1]) ranges.max[1] = g;
652  if (b > ranges.max[2]) ranges.max[2] = b;
653 
654  tmp_pal[nb_color].value = c;
655  tmp_pal[nb_color].pal_id = i;
656 
657  nb_color++;
658  }
659 
660  if (!nb_color)
661  return -1;
662 
663  /* define longest axis that will be the split component */
664  wr = ranges.max[0] - ranges.min[0];
665  wg = ranges.max[1] - ranges.min[1];
666  wb = ranges.max[2] - ranges.min[2];
667  if (wr >= wg && wr >= wb) longest = 1;
668  if (wg >= wr && wg >= wb) longest = 2;
669  if (wb >= wr && wb >= wg) longest = 3;
670  cmpf = cmp_funcs[longest];
671  *component = longest;
672 
673  /* sort along this axis to get median */
674  AV_QSORT(tmp_pal, nb_color, struct color, cmpf);
675 
676  return tmp_pal[nb_color >> 1].pal_id;
677 }
678 
679 static int colormap_insert(struct color_node *map,
680  uint8_t *color_used,
681  int *nb_used,
682  const uint32_t *palette,
683  const int trans_thresh,
684  const struct color_rect *box)
685 {
686  uint32_t c;
687  int component, cur_id;
688  int node_left_id = -1, node_right_id = -1;
689  struct color_node *node;
690  struct color_rect box1, box2;
691  const int pal_id = get_next_color(color_used, palette, trans_thresh, &component, box);
692 
693  if (pal_id < 0)
694  return -1;
695 
696  /* create new node with that color */
697  cur_id = (*nb_used)++;
698  c = palette[pal_id];
699  node = &map[cur_id];
700  node->split = component;
701  node->palette_id = pal_id;
702  node->val[0] = c>>24 & 0xff;
703  node->val[1] = c>>16 & 0xff;
704  node->val[2] = c>> 8 & 0xff;
705  node->val[3] = c & 0xff;
706 
707  color_used[pal_id] = 1;
708 
709  /* get the two boxes this node creates */
710  box1 = box2 = *box;
711  box1.max[component-1] = node->val[component];
712  box2.min[component-1] = node->val[component] + 1;
713 
714  node_left_id = colormap_insert(map, color_used, nb_used, palette, trans_thresh, &box1);
715 
716  if (box2.min[component-1] <= box2.max[component-1])
717  node_right_id = colormap_insert(map, color_used, nb_used, palette, trans_thresh, &box2);
718 
719  node->left_id = node_left_id;
720  node->right_id = node_right_id;
721 
722  return cur_id;
723 }
724 
725 static int cmp_pal_entry(const void *a, const void *b)
726 {
727  const int c1 = *(const uint32_t *)a & 0xffffff;
728  const int c2 = *(const uint32_t *)b & 0xffffff;
729  return c1 - c2;
730 }
731 
733 {
734  int i, nb_used = 0;
735  uint8_t color_used[AVPALETTE_COUNT] = {0};
736  uint32_t last_color = 0;
737  struct color_rect box;
738 
739  /* disable transparent colors and dups */
740  qsort(s->palette, AVPALETTE_COUNT, sizeof(*s->palette), cmp_pal_entry);
741  // update transparency index:
742  if (s->transparency_index >= 0) {
743  for (i = 0; i < AVPALETTE_COUNT; i++) {
744  if ((s->palette[i]>>24 & 0xff) == 0) {
745  s->transparency_index = i; // we are assuming at most one transparent color in palette
746  break;
747  }
748  }
749  }
750 
751  for (i = 0; i < AVPALETTE_COUNT; i++) {
752  const uint32_t c = s->palette[i];
753  if (i != 0 && c == last_color) {
754  color_used[i] = 1;
755  continue;
756  }
757  last_color = c;
758  if (c >> 24 < s->trans_thresh) {
759  color_used[i] = 1; // ignore transparent color(s)
760  continue;
761  }
762  }
763 
764  box.min[0] = box.min[1] = box.min[2] = 0x00;
765  box.max[0] = box.max[1] = box.max[2] = 0xff;
766 
767  colormap_insert(s->map, color_used, &nb_used, s->palette, s->trans_thresh, &box);
768 
769  if (s->dot_filename)
770  disp_tree(s->map, s->dot_filename);
771 
772  if (s->debug_accuracy) {
774  av_log(NULL, AV_LOG_INFO, "Accuracy check passed\n");
775  }
776 }
777 
778 static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1,
779  const AVFrame *in2, int frame_count)
780 {
781  int x, y;
782  const uint32_t *palette = s->palette;
783  uint32_t *src1 = (uint32_t *)in1->data[0];
784  uint8_t *src2 = in2->data[0];
785  const int src1_linesize = in1->linesize[0] >> 2;
786  const int src2_linesize = in2->linesize[0];
787  const float div = in1->width * in1->height * 3;
788  unsigned mean_err = 0;
789 
790  for (y = 0; y < in1->height; y++) {
791  for (x = 0; x < in1->width; x++) {
792  const uint32_t c1 = src1[x];
793  const uint32_t c2 = palette[src2[x]];
794  const uint8_t argb1[] = {0xff, c1 >> 16 & 0xff, c1 >> 8 & 0xff, c1 & 0xff};
795  const uint8_t argb2[] = {0xff, c2 >> 16 & 0xff, c2 >> 8 & 0xff, c2 & 0xff};
796  mean_err += diff(argb1, argb2, s->trans_thresh);
797  }
798  src1 += src1_linesize;
799  src2 += src2_linesize;
800  }
801 
802  s->total_mean_err += mean_err;
803 
804  av_log(NULL, AV_LOG_INFO, "MEP:%.3f TotalMEP:%.3f\n",
805  mean_err / div, s->total_mean_err / (div * frame_count));
806 }
807 
809  const AVFrame *prv_src, const AVFrame *cur_src,
810  const AVFrame *prv_dst, AVFrame *cur_dst,
811  int *xp, int *yp, int *wp, int *hp)
812 {
813  int x_start = 0, y_start = 0;
814  int width = cur_src->width;
815  int height = cur_src->height;
816 
817  if (prv_src->data[0] && diff_mode == DIFF_MODE_RECTANGLE) {
818  int y;
819  int x_end = cur_src->width - 1,
820  y_end = cur_src->height - 1;
821  const uint32_t *prv_srcp = (const uint32_t *)prv_src->data[0];
822  const uint32_t *cur_srcp = (const uint32_t *)cur_src->data[0];
823  const uint8_t *prv_dstp = prv_dst->data[0];
824  uint8_t *cur_dstp = cur_dst->data[0];
825 
826  const int prv_src_linesize = prv_src->linesize[0] >> 2;
827  const int cur_src_linesize = cur_src->linesize[0] >> 2;
828  const int prv_dst_linesize = prv_dst->linesize[0];
829  const int cur_dst_linesize = cur_dst->linesize[0];
830 
831  /* skip common lines */
832  while (y_start < y_end && !memcmp(prv_srcp + y_start*prv_src_linesize,
833  cur_srcp + y_start*cur_src_linesize,
834  cur_src->width * 4)) {
835  memcpy(cur_dstp + y_start*cur_dst_linesize,
836  prv_dstp + y_start*prv_dst_linesize,
837  cur_dst->width);
838  y_start++;
839  }
840  while (y_end > y_start && !memcmp(prv_srcp + y_end*prv_src_linesize,
841  cur_srcp + y_end*cur_src_linesize,
842  cur_src->width * 4)) {
843  memcpy(cur_dstp + y_end*cur_dst_linesize,
844  prv_dstp + y_end*prv_dst_linesize,
845  cur_dst->width);
846  y_end--;
847  }
848 
849  height = y_end + 1 - y_start;
850 
851  /* skip common columns */
852  while (x_start < x_end) {
853  int same_column = 1;
854  for (y = y_start; y <= y_end; y++) {
855  if (prv_srcp[y*prv_src_linesize + x_start] != cur_srcp[y*cur_src_linesize + x_start]) {
856  same_column = 0;
857  break;
858  }
859  }
860  if (!same_column)
861  break;
862  x_start++;
863  }
864  while (x_end > x_start) {
865  int same_column = 1;
866  for (y = y_start; y <= y_end; y++) {
867  if (prv_srcp[y*prv_src_linesize + x_end] != cur_srcp[y*cur_src_linesize + x_end]) {
868  same_column = 0;
869  break;
870  }
871  }
872  if (!same_column)
873  break;
874  x_end--;
875  }
876  width = x_end + 1 - x_start;
877 
878  if (x_start) {
879  for (y = y_start; y <= y_end; y++)
880  memcpy(cur_dstp + y*cur_dst_linesize,
881  prv_dstp + y*prv_dst_linesize, x_start);
882  }
883  if (x_end != cur_src->width - 1) {
884  const int copy_len = cur_src->width - 1 - x_end;
885  for (y = y_start; y <= y_end; y++)
886  memcpy(cur_dstp + y*cur_dst_linesize + x_end + 1,
887  prv_dstp + y*prv_dst_linesize + x_end + 1,
888  copy_len);
889  }
890  }
891  *xp = x_start;
892  *yp = y_start;
893  *wp = width;
894  *hp = height;
895 }
896 
898 {
899  int x, y, w, h, ret;
900  AVFilterContext *ctx = inlink->dst;
901  PaletteUseContext *s = ctx->priv;
902  AVFilterLink *outlink = inlink->dst->outputs[0];
903 
904  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
905  if (!out) {
906  av_frame_free(&in);
907  *outf = NULL;
908  return AVERROR(ENOMEM);
909  }
910  av_frame_copy_props(out, in);
911 
913  s->last_out, out, &x, &y, &w, &h);
916  if (av_frame_ref(s->last_in, in) < 0 ||
917  av_frame_ref(s->last_out, out) < 0 ||
919  av_frame_free(&in);
920  av_frame_free(&out);
921  *outf = NULL;
922  return AVERROR(ENOMEM);
923  }
924 
925  ff_dlog(ctx, "%dx%d rect: (%d;%d) -> (%d,%d) [area:%dx%d]\n",
926  w, h, x, y, x+w, y+h, in->width, in->height);
927 
928  ret = s->set_frame(s, out, in, x, y, w, h);
929  if (ret < 0) {
930  av_frame_free(&out);
931  *outf = NULL;
932  return ret;
933  }
934  memcpy(out->data[1], s->palette, AVPALETTE_SIZE);
935  if (s->calc_mean_err)
936  debug_mean_error(s, in, out, inlink->frame_count_out);
937  av_frame_free(&in);
938  *outf = out;
939  return 0;
940 }
941 
942 static int config_output(AVFilterLink *outlink)
943 {
944  int ret;
945  AVFilterContext *ctx = outlink->src;
946  PaletteUseContext *s = ctx->priv;
947 
948  ret = ff_framesync_init_dualinput(&s->fs, ctx);
949  if (ret < 0)
950  return ret;
951  s->fs.opt_repeatlast = 1; // only 1 frame in the palette
952  s->fs.in[1].before = s->fs.in[1].after = EXT_INFINITY;
954 
955  outlink->w = ctx->inputs[0]->w;
956  outlink->h = ctx->inputs[0]->h;
957 
958  outlink->time_base = ctx->inputs[0]->time_base;
959  if ((ret = ff_framesync_configure(&s->fs)) < 0)
960  return ret;
961  return 0;
962 }
963 
965 {
966  AVFilterContext *ctx = inlink->dst;
967 
968  if (inlink->w * inlink->h != AVPALETTE_COUNT) {
969  av_log(ctx, AV_LOG_ERROR,
970  "Palette input must contain exactly %d pixels. "
971  "Specified input has %dx%d=%d pixels\n",
972  AVPALETTE_COUNT, inlink->w, inlink->h,
973  inlink->w * inlink->h);
974  return AVERROR(EINVAL);
975  }
976  return 0;
977 }
978 
979 static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
980 {
981  int i, x, y;
982  const uint32_t *p = (const uint32_t *)palette_frame->data[0];
983  const int p_linesize = palette_frame->linesize[0] >> 2;
984 
985  s->transparency_index = -1;
986 
987  if (s->new) {
988  memset(s->palette, 0, sizeof(s->palette));
989  memset(s->map, 0, sizeof(s->map));
990  for (i = 0; i < CACHE_SIZE; i++)
991  av_freep(&s->cache[i].entries);
992  memset(s->cache, 0, sizeof(s->cache));
993  }
994 
995  i = 0;
996  for (y = 0; y < palette_frame->height; y++) {
997  for (x = 0; x < palette_frame->width; x++) {
998  s->palette[i] = p[x];
999  if (p[x]>>24 < s->trans_thresh) {
1000  s->transparency_index = i; // we are assuming at most one transparent color in palette
1001  }
1002  i++;
1003  }
1004  p += p_linesize;
1005  }
1006 
1007  load_colormap(s);
1008 
1009  if (!s->new)
1010  s->palette_loaded = 1;
1011 }
1012 
1014 {
1015  AVFilterContext *ctx = fs->parent;
1016  AVFilterLink *inlink = ctx->inputs[0];
1017  PaletteUseContext *s = ctx->priv;
1018  AVFrame *master, *second, *out = NULL;
1019  int ret;
1020 
1021  // writable for error diffusal dithering
1022  ret = ff_framesync_dualinput_get_writable(fs, &master, &second);
1023  if (ret < 0)
1024  return ret;
1025  if (!master || !second) {
1026  ret = AVERROR_BUG;
1027  goto error;
1028  }
1029  if (!s->palette_loaded) {
1030  load_palette(s, second);
1031  }
1032  ret = apply_palette(inlink, master, &out);
1033  if (ret < 0)
1034  goto error;
1035  return ff_filter_frame(ctx->outputs[0], out);
1036 
1037 error:
1038  av_frame_free(&master);
1039  return ret;
1040 }
1041 
1042 #define DEFINE_SET_FRAME(color_search, name, value) \
1043 static int set_frame_##name(PaletteUseContext *s, AVFrame *out, AVFrame *in, \
1044  int x_start, int y_start, int w, int h) \
1045 { \
1046  return set_frame(s, out, in, x_start, y_start, w, h, value, color_search); \
1047 }
1048 
1049 #define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro) \
1050  DEFINE_SET_FRAME(color_search_macro, color_search##_##none, DITHERING_NONE) \
1051  DEFINE_SET_FRAME(color_search_macro, color_search##_##bayer, DITHERING_BAYER) \
1052  DEFINE_SET_FRAME(color_search_macro, color_search##_##heckbert, DITHERING_HECKBERT) \
1053  DEFINE_SET_FRAME(color_search_macro, color_search##_##floyd_steinberg, DITHERING_FLOYD_STEINBERG) \
1054  DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2, DITHERING_SIERRA2) \
1055  DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2_4a, DITHERING_SIERRA2_4A) \
1056 
1060 
1061 #define DITHERING_ENTRIES(color_search) { \
1062  set_frame_##color_search##_none, \
1063  set_frame_##color_search##_bayer, \
1064  set_frame_##color_search##_heckbert, \
1065  set_frame_##color_search##_floyd_steinberg, \
1066  set_frame_##color_search##_sierra2, \
1067  set_frame_##color_search##_sierra2_4a, \
1068 }
1069 
1071  DITHERING_ENTRIES(nns_iterative),
1072  DITHERING_ENTRIES(nns_recursive),
1073  DITHERING_ENTRIES(bruteforce),
1074 };
1075 
1076 static int dither_value(int p)
1077 {
1078  const int q = p ^ (p >> 3);
1079  return (p & 4) >> 2 | (q & 4) >> 1 \
1080  | (p & 2) << 1 | (q & 2) << 2 \
1081  | (p & 1) << 4 | (q & 1) << 5;
1082 }
1083 
1085 {
1086  PaletteUseContext *s = ctx->priv;
1087 
1088  s->last_in = av_frame_alloc();
1089  s->last_out = av_frame_alloc();
1090  if (!s->last_in || !s->last_out) {
1091  av_frame_free(&s->last_in);
1092  av_frame_free(&s->last_out);
1093  return AVERROR(ENOMEM);
1094  }
1095 
1096  s->set_frame = set_frame_lut[s->color_search_method][s->dither];
1097 
1098  if (s->dither == DITHERING_BAYER) {
1099  int i;
1100  const int delta = 1 << (5 - s->bayer_scale); // to avoid too much luma
1101 
1102  for (i = 0; i < FF_ARRAY_ELEMS(s->ordered_dither); i++)
1103  s->ordered_dither[i] = (dither_value(i) >> s->bayer_scale) - delta;
1104  }
1105 
1106  return 0;
1107 }
1108 
1110 {
1111  PaletteUseContext *s = ctx->priv;
1112  return ff_framesync_activate(&s->fs);
1113 }
1114 
1116 {
1117  int i;
1118  PaletteUseContext *s = ctx->priv;
1119 
1120  ff_framesync_uninit(&s->fs);
1121  for (i = 0; i < CACHE_SIZE; i++)
1122  av_freep(&s->cache[i].entries);
1123  av_frame_free(&s->last_in);
1124  av_frame_free(&s->last_out);
1125 }
1126 
1127 static const AVFilterPad paletteuse_inputs[] = {
1128  {
1129  .name = "default",
1130  .type = AVMEDIA_TYPE_VIDEO,
1131  },{
1132  .name = "palette",
1133  .type = AVMEDIA_TYPE_VIDEO,
1134  .config_props = config_input_palette,
1135  },
1136  { NULL }
1137 };
1138 
1140  {
1141  .name = "default",
1142  .type = AVMEDIA_TYPE_VIDEO,
1143  .config_props = config_output,
1144  },
1145  { NULL }
1146 };
1147 
1149  .name = "paletteuse",
1150  .description = NULL_IF_CONFIG_SMALL("Use a palette to downsample an input video stream."),
1151  .priv_size = sizeof(PaletteUseContext),
1153  .init = init,
1154  .uninit = uninit,
1155  .activate = activate,
1156  .inputs = paletteuse_inputs,
1157  .outputs = paletteuse_outputs,
1158  .priv_class = &paletteuse_class,
1159 };
diff_mode
Definition: vf_paletteuse.c:52
static av_always_inline int get_dst_color_err(PaletteUseContext *s, uint32_t c, int *er, int *eg, int *eb, const enum color_search_method search_method)
static void colormap_nearest_node(const struct color_node *map, const int node_pos, const uint8_t *target, const int trans_thresh, struct nearest_color *nearest)
uint64_t total_mean_err
AVFILTER_DEFINE_CLASS(paletteuse)
#define NULL
Definition: coverity.c:32
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
static int shift(int a, int b)
Definition: sonic.c:82
static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int colormap_insert(struct color_node *map, uint8_t *color_used, int *nb_used, const uint32_t *palette, const int trans_thresh, const struct color_rect *box)
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
color_search_method
Definition: vf_paletteuse.c:45
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
dithering_mode
Definition: vf_paletteuse.c:35
static int query_formats(AVFilterContext *ctx)
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:158
static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target, const int trans_thresh)
static const AVFilterPad paletteuse_outputs[]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb, const int trans_thresh)
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
const char * master
Definition: vf_curves.c:117
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static void error(const char *err)
#define src
Definition: vp8dsp.c:254
uint8_t val[4]
Definition: vf_paletteuse.c:59
static av_cold int init(AVFilterContext *ctx)
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
AVFilter ff_vf_paletteuse
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
float delta
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
uint8_t pal_id
#define INDENT
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:361
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
uint32_t color
Definition: vf_paletteuse.c:69
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:399
static const cmp_func cmp_funcs[]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
static int config_input_palette(AVFilterLink *inlink)
FFFrameSyncIn * in
Pointer to array of inputs.
Definition: framesync.h:203
#define height
FFFrameSync fs
Definition: vf_paletteuse.c:85
static const uint64_t c1
Definition: murmur3.c:49
#define ff_dlog(a,...)
int(* set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int width, int height)
Definition: vf_paletteuse.c:80
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
#define max(a, b)
Definition: cuda_runtime.h:33
uint32_t palette[AVPALETTE_COUNT]
Definition: vf_paletteuse.c:88
static const AVOption paletteuse_options[]
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
#define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro)
static int disp_tree(const struct color_node *node, const char *fname)
set_frame_func set_frame
Definition: vf_paletteuse.c:94
static int load_apply_palette(FFFrameSync *fs)
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t max[3]
#define AV_BPRINT_SIZE_UNLIMITED
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
Frame sync structure.
Definition: framesync.h:146
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define COLORMAP_NEAREST(search, palette, root, target, trans_thresh)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:114
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57
void * priv
private data for use by the filter
Definition: avfilter.h:353
static const set_frame_func set_frame_lut[NB_COLOR_SEARCHES][NB_DITHERING]
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:344
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
int opt_repeatlast
Definition: framesync.h:205
static int dither_value(int p)
static int debug_accuracy(const struct color_node *node, const uint32_t *palette, const int trans_thresh, const enum color_search_method search_method)
common internal API header
#define b
Definition: input.c:41
static int get_next_color(const uint8_t *color_used, const uint32_t *palette, const int trans_thresh, int *component, const struct color_rect *box)
static void disp_node(AVBPrint *buf, const struct color_node *map, int parent_id, int node_id, int depth)
#define width
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
static void set_processing_window(enum diff_mode diff_mode, const AVFrame *prv_src, const AVFrame *cur_src, const AVFrame *prv_dst, AVFrame *cur_dst, int *xp, int *yp, int *wp, int *hp)
uint8_t w
Definition: llviddspenc.c:38
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
#define NBITS
Definition: vf_paletteuse.c:65
static int apply_palette(AVFilterLink *inlink, AVFrame *in, AVFrame **outf)
uint8_t min[3]
#define s(width, name)
Definition: cbs_vp9.c:257
static int activate(AVFilterContext *ctx)
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
static int config_output(AVFilterLink *outlink)
if(ret)
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Extend the frame to infinity.
Definition: framesync.h:75
static int cmp_pal_entry(const void *a, const void *b)
void * av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, const uint8_t *elem_data)
Add an element of size elem_size to a dynamic array.
Definition: mem.c:322
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static av_always_inline int color_get(PaletteUseContext *s, uint32_t color, uint8_t a, uint8_t r, uint8_t g, uint8_t b, const enum color_search_method search_method)
Check if the requested color is in the cache already.
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
AVFrame * last_out
Definition: vf_paletteuse.c:99
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
#define DITHERING_ENTRIES(color_search)
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
static void load_colormap(PaletteUseContext *s)
const VDPAUPixFmtMap * map
#define DECLARE_CMP_FUNC(name, pos)
#define OFFSET(x)
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define AVPALETTE_COUNT
Definition: pixfmt.h:33
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
#define FLAGS
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
uint8_t pal_entry
Definition: vf_paletteuse.c:70
struct cache_node cache[CACHE_SIZE]
Definition: vf_paletteuse.c:86
int(* cmp_func)(const void *, const void *)
struct cached_color * entries
Definition: vf_paletteuse.c:74
static const uint64_t c2
Definition: murmur3.c:50
static av_always_inline int set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int w, int h, enum dithering_mode dither, const enum color_search_method search_method)
static av_cold void uninit(AVFilterContext *ctx)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
uint8_t palette_id
Definition: vf_paletteuse.c:60
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define av_always_inline
Definition: attributes.h:39
static const AVFilterPad paletteuse_inputs[]
static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *argb, const int trans_thresh)
#define CACHE_SIZE
Definition: vf_paletteuse.c:66
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2, const int trans_thresh)
struct color_node map[AVPALETTE_COUNT]
Definition: vf_paletteuse.c:87
float min
uint32_t value
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
int ordered_dither[8 *8]
Definition: vf_paletteuse.c:96
static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1, const AVFrame *in2, int frame_count)