FFmpeg
vf_hqx.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file
21  * hqx magnification filters (hq2x, hq3x, hq4x)
22  *
23  * Originally designed by Maxim Stephin.
24  *
25  * @see http://en.wikipedia.org/wiki/Hqx
26  * @see http://web.archive.org/web/20131114143602/http://www.hiend3d.com/hq3x.html
27  * @see http://blog.pkh.me/p/19-butchering-hqx-scaling-filters.html
28  */
29 
30 #include "libavutil/opt.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/pixdesc.h"
33 #include "internal.h"
34 
35 typedef int (*hqxfunc_t)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
36 
37 typedef struct HQXContext {
38  const AVClass *class;
39  int n;
41  uint32_t rgbtoyuv[1<<24];
42 } HQXContext;
43 
44 typedef struct ThreadData {
45  AVFrame *in, *out;
46  const uint32_t *rgbtoyuv;
47 } ThreadData;
48 
49 #define OFFSET(x) offsetof(HQXContext, x)
50 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
51 static const AVOption hqx_options[] = {
52  { "n", "set scale factor", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 3}, 2, 4, .flags = FLAGS },
53  { NULL }
54 };
55 
57 
58 static av_always_inline uint32_t rgb2yuv(const uint32_t *r2y, uint32_t c)
59 {
60  return r2y[c & 0xffffff];
61 }
62 
63 static av_always_inline int yuv_diff(uint32_t yuv1, uint32_t yuv2)
64 {
65 #define YMASK 0xff0000
66 #define UMASK 0x00ff00
67 #define VMASK 0x0000ff
68 #define ABSDIFF(a,b) (abs((int)(a)-(int)(b)))
69 
70  return ABSDIFF(yuv1 & YMASK, yuv2 & YMASK) > (48 << 16) ||
71  ABSDIFF(yuv1 & UMASK, yuv2 & UMASK) > ( 7 << 8) ||
72  ABSDIFF(yuv1 & VMASK, yuv2 & VMASK) > ( 6 << 0);
73 }
74 
75 /* (c1*w1 + c2*w2) >> s */
76 static av_always_inline uint32_t interp_2px(uint32_t c1, int w1, uint32_t c2, int w2, int s)
77 {
78  return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2) << (8 - s)) & 0xff00ff00) |
79  (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2) >> s ) & 0x00ff00ff);
80 }
81 
82 /* (c1*w1 + c2*w2 + c3*w3) >> s */
83 static av_always_inline uint32_t interp_3px(uint32_t c1, int w1, uint32_t c2, int w2, uint32_t c3, int w3, int s)
84 {
85  return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2 + ((c3 & 0xff00ff00) >> 8) * w3) << (8 - s)) & 0xff00ff00) |
86  (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2 + ((c3 & 0x00ff00ff) ) * w3) >> s ) & 0x00ff00ff);
87 }
88 
89 /* m is the mask of diff with the center pixel that matters in the pattern, and
90  * r is the expected result (bit set to 1 if there is difference with the
91  * center, 0 otherwise) */
92 #define P(m, r) ((k_shuffled & (m)) == (r))
93 
94 /* adjust 012345678 to 01235678: the mask doesn't contain the (null) diff
95  * between the center/current pixel and itself */
96 #define DROP4(z) ((z) > 4 ? (z)-1 : (z))
97 
98 /* shuffle the input mask: move bit n (4-adjusted) to position stored in p<n> */
99 #define SHF(x, rot, n) (((x) >> ((rot) ? 7-DROP4(n) : DROP4(n)) & 1) << DROP4(p##n))
100 
101 /* used to check if there is YUV difference between 2 pixels */
102 #define WDIFF(c1, c2) yuv_diff(rgb2yuv(r2y, c1), rgb2yuv(r2y, c2))
103 
104 /* bootstrap template for every interpolation code. It defines the shuffled
105  * masks and surrounding pixels. The rot flag is used to indicate if it's a
106  * rotation; its basic effect is to shuffle k using p8..p0 instead of p0..p8 */
107 #define INTERP_BOOTSTRAP(rot) \
108  const int k_shuffled = SHF(k,rot,0) | SHF(k,rot,1) | SHF(k,rot,2) \
109  | SHF(k,rot,3) | 0 | SHF(k,rot,5) \
110  | SHF(k,rot,6) | SHF(k,rot,7) | SHF(k,rot,8); \
111  \
112  const uint32_t w0 = w[p0], w1 = w[p1], \
113  w3 = w[p3], w4 = w[p4], w5 = w[p5], \
114  w7 = w[p7]
115 
116 /* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
117  * top-left pixel in the total of the 2x2 pixels to interpolates. The function
118  * is also used for the 3 other pixels */
119 static av_always_inline uint32_t hq2x_interp_1x1(const uint32_t *r2y, int k,
120  const uint32_t *w,
121  int p0, int p1, int p2,
122  int p3, int p4, int p5,
123  int p6, int p7, int p8)
124 {
125  INTERP_BOOTSTRAP(0);
126 
127  if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
128  return interp_2px(w4, 3, w3, 1, 2);
129  if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
130  return interp_2px(w4, 3, w1, 1, 2);
131  if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
132  return w4;
133  if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
134  P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
135  P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
136  P(0xeb,0x8a)) && WDIFF(w3, w1))
137  return interp_2px(w4, 3, w0, 1, 2);
138  if (P(0x0b,0x08))
139  return interp_3px(w4, 2, w0, 1, w1, 1, 2);
140  if (P(0x0b,0x02))
141  return interp_3px(w4, 2, w0, 1, w3, 1, 2);
142  if (P(0x2f,0x2f))
143  return interp_3px(w4, 14, w3, 1, w1, 1, 4);
144  if (P(0xbf,0x37) || P(0xdb,0x13))
145  return interp_3px(w4, 5, w1, 2, w3, 1, 3);
146  if (P(0xdb,0x49) || P(0xef,0x6d))
147  return interp_3px(w4, 5, w3, 2, w1, 1, 3);
148  if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
149  return interp_2px(w4, 3, w3, 1, 2);
150  if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
151  return interp_2px(w4, 3, w1, 1, 2);
152  if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
153  return interp_3px(w4, 2, w3, 3, w1, 3, 3);
154  if (P(0xfb,0x6a) || P(0x6f,0x6e) || P(0x3f,0x3e) || P(0xfb,0xfa) ||
155  P(0xdf,0xde) || P(0xdf,0x1e))
156  return interp_2px(w4, 3, w0, 1, 2);
157  if (P(0x0a,0x00) || P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
158  P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) ||
159  P(0x3b,0x1b))
160  return interp_3px(w4, 2, w3, 1, w1, 1, 2);
161  return interp_3px(w4, 6, w3, 1, w1, 1, 3);
162 }
163 
164 /* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
165  * top-left and top-center pixel in the total of the 3x3 pixels to
166  * interpolates. The function is also used for the 3 other couples of pixels
167  * defining the outline. The center pixel is not defined through this function,
168  * since it's just the same as the original value. */
169 static av_always_inline void hq3x_interp_2x1(uint32_t *dst, int dst_linesize,
170  const uint32_t *r2y, int k,
171  const uint32_t *w,
172  int pos00, int pos01,
173  int p0, int p1, int p2,
174  int p3, int p4, int p5,
175  int p6, int p7, int p8,
176  int rotate)
177 {
179 
180  uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
181  uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
182 
183  if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
184  *dst00 = interp_2px(w4, 3, w1, 1, 2);
185  else if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
186  *dst00 = interp_2px(w4, 3, w3, 1, 2);
187  else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
188  *dst00 = w4;
189  else if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
190  P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
191  P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
192  P(0xeb,0x8a)) && WDIFF(w3, w1))
193  *dst00 = interp_2px(w4, 3, w0, 1, 2);
194  else if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
195  *dst00 = interp_2px(w4, 3, w1, 1, 2);
196  else if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
197  *dst00 = interp_2px(w4, 3, w3, 1, 2);
198  else if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
199  *dst00 = interp_2px(w3, 1, w1, 1, 1);
200  else if (P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) || P(0xbe,0x0a) ||
201  P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) || P(0x3b,0x1b))
202  *dst00 = interp_3px(w4, 2, w3, 7, w1, 7, 4);
203  else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) || P(0x6d,0x6c) ||
204  P(0x67,0x66) || P(0x3d,0x3c) || P(0x37,0x36) || P(0xf9,0xf8) ||
205  P(0xdd,0xdc) || P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
206  P(0xd7,0x16) || P(0x0b,0x02))
207  *dst00 = interp_2px(w4, 3, w0, 1, 2);
208  else
209  *dst00 = interp_3px(w4, 2, w3, 1, w1, 1, 2);
210 
211  if ((P(0xfe,0xde) || P(0x9e,0x16) || P(0xda,0x12) || P(0x17,0x16) ||
212  P(0x5b,0x12) || P(0xbb,0x12)) && WDIFF(w1, w5))
213  *dst01 = w4;
214  else if ((P(0x0f,0x0b) || P(0x5e,0x0a) || P(0xfb,0x7b) || P(0x3b,0x0b) ||
215  P(0xbe,0x0a) || P(0x7a,0x0a)) && WDIFF(w3, w1))
216  *dst01 = w4;
217  else if (P(0xbf,0x8f) || P(0x7e,0x0e) || P(0xbf,0x37) || P(0xdb,0x13))
218  *dst01 = interp_2px(w1, 3, w4, 1, 2);
219  else if (P(0x02,0x00) || P(0x7c,0x28) || P(0xed,0xa9) || P(0xf5,0xb4) ||
220  P(0xd9,0x90))
221  *dst01 = interp_2px(w4, 3, w1, 1, 2);
222  else if (P(0x4f,0x4b) || P(0xfb,0x7b) || P(0xfe,0x7e) || P(0x9f,0x1b) ||
223  P(0x2f,0x0b) || P(0xbe,0x0a) || P(0x7e,0x0a) || P(0xfb,0x4b) ||
224  P(0xfb,0xdb) || P(0xfe,0xde) || P(0xfe,0x56) || P(0x57,0x56) ||
225  P(0x97,0x16) || P(0x3f,0x1e) || P(0xdb,0x12) || P(0xbb,0x12))
226  *dst01 = interp_2px(w4, 7, w1, 1, 3);
227  else
228  *dst01 = w4;
229 }
230 
231 /* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
232  * top-left block of 2x2 pixels in the total of the 4x4 pixels (or 4 blocks) to
233  * interpolates. The function is also used for the 3 other blocks of 2x2
234  * pixels. */
235 static av_always_inline void hq4x_interp_2x2(uint32_t *dst, int dst_linesize,
236  const uint32_t *r2y, int k,
237  const uint32_t *w,
238  int pos00, int pos01,
239  int pos10, int pos11,
240  int p0, int p1, int p2,
241  int p3, int p4, int p5,
242  int p6, int p7, int p8)
243 {
244  INTERP_BOOTSTRAP(0);
245 
246  uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
247  uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
248  uint32_t *dst10 = &dst[dst_linesize*(pos10>>1) + (pos10&1)];
249  uint32_t *dst11 = &dst[dst_linesize*(pos11>>1) + (pos11&1)];
250 
251  const int cond00 = (P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5);
252  const int cond01 = (P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3);
253  const int cond02 = (P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) ||
254  P(0xdf,0x5a) || P(0x9f,0x8a) || P(0xcf,0x8a) ||
255  P(0xef,0x4e) || P(0x3f,0x0e) || P(0xfb,0x5a) ||
256  P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
257  P(0xeb,0x8a)) && WDIFF(w3, w1);
258  const int cond03 = P(0xdb,0x49) || P(0xef,0x6d);
259  const int cond04 = P(0xbf,0x37) || P(0xdb,0x13);
260  const int cond05 = P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) ||
261  P(0x6b,0x43);
262  const int cond06 = P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) ||
263  P(0x3b,0x19);
264  const int cond07 = P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) ||
265  P(0x6d,0x6c) || P(0x67,0x66) || P(0x3d,0x3c) ||
266  P(0x37,0x36) || P(0xf9,0xf8) || P(0xdd,0xdc) ||
267  P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
268  P(0xd7,0x16) || P(0x0b,0x02);
269  const int cond08 = (P(0x0f,0x0b) || P(0x2b,0x0b) || P(0xfe,0x4a) ||
270  P(0xfe,0x1a)) && WDIFF(w3, w1);
271  const int cond09 = P(0x2f,0x2f);
272  const int cond10 = P(0x0a,0x00);
273  const int cond11 = P(0x0b,0x09);
274  const int cond12 = P(0x7e,0x2a) || P(0xef,0xab);
275  const int cond13 = P(0xbf,0x8f) || P(0x7e,0x0e);
276  const int cond14 = P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
277  P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) ||
278  P(0xeb,0x4b) || P(0x3b,0x1b);
279  const int cond15 = P(0x0b,0x03);
280 
281  if (cond00)
282  *dst00 = interp_2px(w4, 5, w3, 3, 3);
283  else if (cond01)
284  *dst00 = interp_2px(w4, 5, w1, 3, 3);
285  else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
286  *dst00 = w4;
287  else if (cond02)
288  *dst00 = interp_2px(w4, 5, w0, 3, 3);
289  else if (cond03)
290  *dst00 = interp_2px(w4, 3, w3, 1, 2);
291  else if (cond04)
292  *dst00 = interp_2px(w4, 3, w1, 1, 2);
293  else if (cond05)
294  *dst00 = interp_2px(w4, 5, w3, 3, 3);
295  else if (cond06)
296  *dst00 = interp_2px(w4, 5, w1, 3, 3);
297  else if (P(0x0f,0x0b) || P(0x5e,0x0a) || P(0x2b,0x0b) || P(0xbe,0x0a) ||
298  P(0x7a,0x0a) || P(0xee,0x0a))
299  *dst00 = interp_2px(w1, 1, w3, 1, 1);
300  else if (cond07)
301  *dst00 = interp_2px(w4, 5, w0, 3, 3);
302  else
303  *dst00 = interp_3px(w4, 2, w1, 1, w3, 1, 2);
304 
305  if (cond00)
306  *dst01 = interp_2px(w4, 7, w3, 1, 3);
307  else if (cond08)
308  *dst01 = w4;
309  else if (cond02)
310  *dst01 = interp_2px(w4, 3, w0, 1, 2);
311  else if (cond09)
312  *dst01 = w4;
313  else if (cond10)
314  *dst01 = interp_3px(w4, 5, w1, 2, w3, 1, 3);
315  else if (P(0x0b,0x08))
316  *dst01 = interp_3px(w4, 5, w1, 2, w0, 1, 3);
317  else if (cond11)
318  *dst01 = interp_2px(w4, 5, w1, 3, 3);
319  else if (cond04)
320  *dst01 = interp_2px(w1, 3, w4, 1, 2);
321  else if (cond12)
322  *dst01 = interp_3px(w1, 2, w4, 1, w3, 1, 2);
323  else if (cond13)
324  *dst01 = interp_2px(w1, 5, w3, 3, 3);
325  else if (cond05)
326  *dst01 = interp_2px(w4, 7, w3, 1, 3);
327  else if (P(0xf3,0x62) || P(0x67,0x66) || P(0x37,0x36) || P(0xf3,0xf2) ||
328  P(0xd7,0xd6) || P(0xd7,0x16) || P(0x0b,0x02))
329  *dst01 = interp_2px(w4, 3, w0, 1, 2);
330  else if (cond14)
331  *dst01 = interp_2px(w1, 1, w4, 1, 1);
332  else
333  *dst01 = interp_2px(w4, 3, w1, 1, 2);
334 
335  if (cond01)
336  *dst10 = interp_2px(w4, 7, w1, 1, 3);
337  else if (cond08)
338  *dst10 = w4;
339  else if (cond02)
340  *dst10 = interp_2px(w4, 3, w0, 1, 2);
341  else if (cond09)
342  *dst10 = w4;
343  else if (cond10)
344  *dst10 = interp_3px(w4, 5, w3, 2, w1, 1, 3);
345  else if (P(0x0b,0x02))
346  *dst10 = interp_3px(w4, 5, w3, 2, w0, 1, 3);
347  else if (cond15)
348  *dst10 = interp_2px(w4, 5, w3, 3, 3);
349  else if (cond03)
350  *dst10 = interp_2px(w3, 3, w4, 1, 2);
351  else if (cond13)
352  *dst10 = interp_3px(w3, 2, w4, 1, w1, 1, 2);
353  else if (cond12)
354  *dst10 = interp_2px(w3, 5, w1, 3, 3);
355  else if (cond06)
356  *dst10 = interp_2px(w4, 7, w1, 1, 3);
357  else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0x6d,0x6c) || P(0x3d,0x3c) ||
358  P(0xf9,0xf8) || P(0xdd,0xdc) || P(0xdd,0x1c))
359  *dst10 = interp_2px(w4, 3, w0, 1, 2);
360  else if (cond14)
361  *dst10 = interp_2px(w3, 1, w4, 1, 1);
362  else
363  *dst10 = interp_2px(w4, 3, w3, 1, 2);
364 
365  if ((P(0x7f,0x2b) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7f,0x0f)) &&
366  WDIFF(w3, w1))
367  *dst11 = w4;
368  else if (cond02)
369  *dst11 = interp_2px(w4, 7, w0, 1, 3);
370  else if (cond15)
371  *dst11 = interp_2px(w4, 7, w3, 1, 3);
372  else if (cond11)
373  *dst11 = interp_2px(w4, 7, w1, 1, 3);
374  else if (P(0x0a,0x00) || P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) ||
375  P(0x7e,0x0e))
376  *dst11 = interp_3px(w4, 6, w3, 1, w1, 1, 3);
377  else if (cond07)
378  *dst11 = interp_2px(w4, 7, w0, 1, 3);
379  else
380  *dst11 = w4;
381 }
382 
383 static av_always_inline void hqx_filter(const ThreadData *td, int jobnr, int nb_jobs, int n)
384 {
385  int x, y;
386  AVFrame *in = td->in, *out = td->out;
387  const uint32_t *r2y = td->rgbtoyuv;
388  const int height = in->height;
389  const int width = in->width;
390  const int slice_start = (height * jobnr ) / nb_jobs;
391  const int slice_end = (height * (jobnr+1)) / nb_jobs;
392  const int dst_linesize = out->linesize[0];
393  const int src_linesize = in->linesize[0];
394  uint8_t *dst = out->data[0] + slice_start * dst_linesize * n;
395  const uint8_t *src = in->data[0] + slice_start * src_linesize;
396 
397  const int dst32_linesize = dst_linesize >> 2;
398  const int src32_linesize = src_linesize >> 2;
399 
400  for (y = slice_start; y < slice_end; y++) {
401  const uint32_t *src32 = (const uint32_t *)src;
402  uint32_t *dst32 = (uint32_t *)dst;
403  const int prevline = y > 0 ? -src32_linesize : 0;
404  const int nextline = y < height - 1 ? src32_linesize : 0;
405 
406  for (x = 0; x < width; x++) {
407  const int prevcol = x > 0 ? -1 : 0;
408  const int nextcol = x < width -1 ? 1 : 0;
409  const uint32_t w[3*3] = {
410  src32[prevcol + prevline], src32[prevline], src32[prevline + nextcol],
411  src32[prevcol ], src32[ 0], src32[ nextcol],
412  src32[prevcol + nextline], src32[nextline], src32[nextline + nextcol]
413  };
414  const uint32_t yuv1 = rgb2yuv(r2y, w[4]);
415  const int pattern = (w[4] != w[0] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[0]))) : 0)
416  | (w[4] != w[1] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[1]))) : 0) << 1
417  | (w[4] != w[2] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[2]))) : 0) << 2
418  | (w[4] != w[3] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[3]))) : 0) << 3
419  | (w[4] != w[5] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[5]))) : 0) << 4
420  | (w[4] != w[6] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[6]))) : 0) << 5
421  | (w[4] != w[7] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[7]))) : 0) << 6
422  | (w[4] != w[8] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[8]))) : 0) << 7;
423 
424  if (n == 2) {
425  dst32[dst32_linesize*0 + 0] = hq2x_interp_1x1(r2y, pattern, w, 0,1,2,3,4,5,6,7,8); // 00
426  dst32[dst32_linesize*0 + 1] = hq2x_interp_1x1(r2y, pattern, w, 2,1,0,5,4,3,8,7,6); // 01 (vert mirrored)
427  dst32[dst32_linesize*1 + 0] = hq2x_interp_1x1(r2y, pattern, w, 6,7,8,3,4,5,0,1,2); // 10 (horiz mirrored)
428  dst32[dst32_linesize*1 + 1] = hq2x_interp_1x1(r2y, pattern, w, 8,7,6,5,4,3,2,1,0); // 11 (center mirrored)
429  } else if (n == 3) {
430  hq3x_interp_2x1(dst32, dst32_linesize, r2y, pattern, w, 0,1, 0,1,2,3,4,5,6,7,8, 0); // 00 01
431  hq3x_interp_2x1(dst32 + 1, dst32_linesize, r2y, pattern, w, 1,3, 2,5,8,1,4,7,0,3,6, 1); // 02 12 (rotated to the right)
432  hq3x_interp_2x1(dst32 + 1*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,0, 6,3,0,7,4,1,8,5,2, 1); // 20 10 (rotated to the left)
433  hq3x_interp_2x1(dst32 + 1*dst32_linesize + 1, dst32_linesize, r2y, pattern, w, 3,2, 8,7,6,5,4,3,2,1,0, 0); // 22 21 (center mirrored)
434  dst32[dst32_linesize + 1] = w[4]; // 11
435  } else if (n == 4) {
436  hq4x_interp_2x2(dst32, dst32_linesize, r2y, pattern, w, 0,1,2,3, 0,1,2,3,4,5,6,7,8); // 00 01 10 11
437  hq4x_interp_2x2(dst32 + 2, dst32_linesize, r2y, pattern, w, 1,0,3,2, 2,1,0,5,4,3,8,7,6); // 02 03 12 13 (vert mirrored)
438  hq4x_interp_2x2(dst32 + 2*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,3,0,1, 6,7,8,3,4,5,0,1,2); // 20 21 30 31 (horiz mirrored)
439  hq4x_interp_2x2(dst32 + 2*dst32_linesize + 2, dst32_linesize, r2y, pattern, w, 3,2,1,0, 8,7,6,5,4,3,2,1,0); // 22 23 32 33 (center mirrored)
440  } else {
441  av_assert0(0);
442  }
443 
444  src32 += 1;
445  dst32 += n;
446  }
447 
448  src += src_linesize;
449  dst += dst_linesize * n;
450  }
451 }
452 
453 #define HQX_FUNC(size) \
454 static int hq##size##x(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
455 { \
456  hqx_filter(arg, jobnr, nb_jobs, size); \
457  return 0; \
458 }
459 
460 HQX_FUNC(2)
461 HQX_FUNC(3)
462 HQX_FUNC(4)
463 
465 {
466  static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
468  if (!fmts_list)
469  return AVERROR(ENOMEM);
470  return ff_set_common_formats(ctx, fmts_list);
471 }
472 
473 static int config_output(AVFilterLink *outlink)
474 {
475  AVFilterContext *ctx = outlink->src;
476  HQXContext *hqx = ctx->priv;
477  AVFilterLink *inlink = ctx->inputs[0];
478 
479  outlink->w = inlink->w * hqx->n;
480  outlink->h = inlink->h * hqx->n;
481  av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
482  av_get_pix_fmt_name(inlink->format),
483  inlink->w, inlink->h, outlink->w, outlink->h);
484  return 0;
485 }
486 
488 {
489  AVFilterContext *ctx = inlink->dst;
490  AVFilterLink *outlink = ctx->outputs[0];
491  HQXContext *hqx = ctx->priv;
492  ThreadData td;
493  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
494  if (!out) {
495  av_frame_free(&in);
496  return AVERROR(ENOMEM);
497  }
499  out->width = outlink->w;
500  out->height = outlink->h;
501 
502  td.in = in;
503  td.out = out;
504  td.rgbtoyuv = hqx->rgbtoyuv;
505  ctx->internal->execute(ctx, hqx->func, &td, NULL, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
506 
507  av_frame_free(&in);
508  return ff_filter_frame(outlink, out);
509 }
510 
512 {
513  HQXContext *hqx = ctx->priv;
514  static const hqxfunc_t hqxfuncs[] = {hq2x, hq3x, hq4x};
515 
516  uint32_t c;
517  int bg, rg, g;
518 
519  for (bg=-255; bg<256; bg++) {
520  for (rg=-255; rg<256; rg++) {
521  const uint32_t u = (uint32_t)((-169*rg + 500*bg)/1000) + 128;
522  const uint32_t v = (uint32_t)(( 500*rg - 81*bg)/1000) + 128;
523  int startg = FFMAX3(-bg, -rg, 0);
524  int endg = FFMIN3(255-bg, 255-rg, 255);
525  uint32_t y = (uint32_t)(( 299*rg + 1000*startg + 114*bg)/1000);
526  c = bg + rg * (1 << 16) + 0x010101 * startg;
527  for (g = startg; g <= endg; g++) {
528  hqx->rgbtoyuv[c] = ((y++) << 16) + (u << 8) + v;
529  c+= 0x010101;
530  }
531  }
532  }
533 
534  hqx->func = hqxfuncs[hqx->n - 2];
535  return 0;
536 }
537 
538 static const AVFilterPad hqx_inputs[] = {
539  {
540  .name = "default",
541  .type = AVMEDIA_TYPE_VIDEO,
542  .filter_frame = filter_frame,
543  },
544  { NULL }
545 };
546 
547 static const AVFilterPad hqx_outputs[] = {
548  {
549  .name = "default",
550  .type = AVMEDIA_TYPE_VIDEO,
551  .config_props = config_output,
552  },
553  { NULL }
554 };
555 
557  .name = "hqx",
558  .description = NULL_IF_CONFIG_SMALL("Scale the input by 2, 3 or 4 using the hq*x magnification algorithm."),
559  .priv_size = sizeof(HQXContext),
560  .init = init,
562  .inputs = hqx_inputs,
563  .outputs = hqx_outputs,
564  .priv_class = &hqx_class,
566 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HQXContext::n
int n
Definition: vf_hqx.c:39
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
ABSDIFF
#define ABSDIFF(a, b)
out
FILE * out
Definition: movenc.c:54
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
P
#define P(m, r)
Definition: vf_hqx.c:92
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
rgb2yuv
static av_always_inline uint32_t rgb2yuv(const uint32_t *r2y, uint32_t c)
Definition: vf_hqx.c:58
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:39
interp_3px
static av_always_inline uint32_t interp_3px(uint32_t c1, int w1, uint32_t c2, int w2, uint32_t c3, int w3, int s)
Definition: vf_hqx.c:83
AVOption
AVOption.
Definition: opt.h:248
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_hqx.c:487
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
c1
static const uint64_t c1
Definition: murmur3.c:51
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_hqx.c:511
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:502
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
ff_vf_hqx
AVFilter ff_vf_hqx
Definition: vf_hqx.c:556
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
UMASK
#define UMASK
WDIFF
#define WDIFF(c1, c2)
Definition: vf_hqx.c:102
VMASK
#define VMASK
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:106
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
rotate
static void rotate(const float rot_quaternion[2][4], float *vec)
Rotate vector with given rotation quaternion.
Definition: vf_v360.c:3919
avassert.h
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
yuv_diff
static av_always_inline int yuv_diff(uint32_t yuv1, uint32_t yuv2)
Definition: vf_hqx.c:63
hq3x_interp_2x1
static av_always_inline void hq3x_interp_2x1(uint32_t *dst, int dst_linesize, const uint32_t *r2y, int k, const uint32_t *w, int pos00, int pos01, int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8, int rotate)
Definition: vf_hqx.c:169
width
#define width
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(hqx)
s
#define s(width, name)
Definition: cbs_vp9.c:257
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_hqx.c:464
g
const char * g
Definition: vf_curves.c:117
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
ctx
AVFormatContext * ctx
Definition: movenc.c:48
hqx_filter
static av_always_inline void hqx_filter(const ThreadData *td, int jobnr, int nb_jobs, int n)
Definition: vf_hqx.c:383
interp_2px
static av_always_inline uint32_t interp_2px(uint32_t c1, int w1, uint32_t c2, int w2, int s)
Definition: vf_hqx.c:76
arg
const char * arg
Definition: jacosubdec.c:66
OFFSET
#define OFFSET(x)
Definition: vf_hqx.c:49
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
src
#define src
Definition: vp8dsp.c:255
HQXContext::rgbtoyuv
uint32_t rgbtoyuv[1<< 24]
Definition: vf_hqx.c:41
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
hqx_inputs
static const AVFilterPad hqx_inputs[]
Definition: vf_hqx.c:538
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
YMASK
#define YMASK
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
ThreadData::rgbtoyuv
const uint32_t * rgbtoyuv
Definition: vf_hqx.c:46
HQX_FUNC
#define HQX_FUNC(size)
Definition: vf_hqx.c:453
HQXContext
Definition: hqx.h:63
internal.h
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
hqx_outputs
static const AVFilterPad hqx_outputs[]
Definition: vf_hqx.c:547
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_hqx.c:473
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
hqxfunc_t
int(* hqxfunc_t)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_hqx.c:35
INTERP_BOOTSTRAP
#define INTERP_BOOTSTRAP(rot)
Definition: vf_hqx.c:107
AVFilter
Filter definition.
Definition: avfilter.h:145
HQXContext::func
hqxfunc_t func
Definition: vf_hqx.c:40
hq4x_interp_2x2
static av_always_inline void hq4x_interp_2x2(uint32_t *dst, int dst_linesize, const uint32_t *r2y, int k, const uint32_t *w, int pos00, int pos01, int pos10, int pos11, int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8)
Definition: vf_hqx.c:235
c2
static const uint64_t c2
Definition: murmur3.c:52
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
FLAGS
#define FLAGS
Definition: vf_hqx.c:50
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_adenorm.c:223
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
hq2x_interp_1x1
static av_always_inline uint32_t hq2x_interp_1x1(const uint32_t *r2y, int k, const uint32_t *w, int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7, int p8)
Definition: vf_hqx.c:119
int
int
Definition: ffmpeg_filter.c:170
hqx_options
static const AVOption hqx_options[]
Definition: vf_hqx.c:51
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489