FFmpeg
pixlet.c
Go to the documentation of this file.
1 /*
2  * Apple Pixlet decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdint.h>
23 
24 #include "libavutil/intmath.h"
25 #include "libavutil/mem.h"
26 
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "codec_internal.h"
30 #include "decode.h"
31 #include "get_bits.h"
32 #include "thread.h"
33 #include "unary.h"
34 
35 #define NB_LEVELS 4
36 
37 #define PIXLET_MAGIC 0xDEADBEEF
38 
39 #define H 0
40 #define V 1
41 
42 typedef struct SubBand {
43  unsigned width, height;
44  unsigned size;
45  unsigned x, y;
46 } SubBand;
47 
48 typedef struct PixletContext {
49  AVClass *class;
50 
53 
54  int levels;
55  int depth;
56  int w, h;
57 
58  int16_t *filter[2];
59  int16_t *prediction;
61  uint16_t lut[65536];
62  SubBand band[4][NB_LEVELS * 3 + 1];
64 
66 {
69  return 0;
70 }
71 
72 static void free_buffers(AVCodecContext *avctx)
73 {
74  PixletContext *ctx = avctx->priv_data;
75 
76  av_freep(&ctx->filter[0]);
77  av_freep(&ctx->filter[1]);
78  av_freep(&ctx->prediction);
79 }
80 
82 {
83  PixletContext *ctx = avctx->priv_data;
84  free_buffers(avctx);
85  ctx->w = 0;
86  ctx->h = 0;
87  return 0;
88 }
89 
90 static int init_decoder(AVCodecContext *avctx)
91 {
92  PixletContext *ctx = avctx->priv_data;
93  int i, plane;
94 
95  ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
96  ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
97  ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
98  if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
99  return AVERROR(ENOMEM);
100 
101  for (plane = 0; plane < 3; plane++) {
102  unsigned shift = plane > 0;
103  unsigned w = ctx->w >> shift;
104  unsigned h = ctx->h >> shift;
105 
106  ctx->band[plane][0].width = w >> NB_LEVELS;
107  ctx->band[plane][0].height = h >> NB_LEVELS;
108  ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
109 
110  for (i = 0; i < NB_LEVELS * 3; i++) {
111  unsigned scale = ctx->levels - (i / 3);
112 
113  ctx->band[plane][i + 1].width = w >> scale;
114  ctx->band[plane][i + 1].height = h >> scale;
115  ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
116 
117  ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
118  ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
119  }
120  }
121 
122  return 0;
123 }
124 
125 static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size,
126  int width, ptrdiff_t stride)
127 {
128  PixletContext *ctx = avctx->priv_data;
129  GetBitContext *bc = &ctx->bc;
130  unsigned cnt1, nbits, k, j = 0, i = 0;
131  int64_t value, state = 3;
132  int rlen, escape, flag = 0;
133 
134  while (i < size) {
135  nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
136 
137  cnt1 = get_unary(bc, 0, 8);
138  if (cnt1 < 8) {
139  value = show_bits(bc, nbits);
140  if (value <= 1) {
141  skip_bits(bc, nbits - 1);
142  escape = ((1 << nbits) - 1) * cnt1;
143  } else {
144  skip_bits(bc, nbits);
145  escape = value + ((1 << nbits) - 1) * cnt1 - 1;
146  }
147  } else {
148  escape = get_bits(bc, 16);
149  }
150 
151  value = -((escape + flag) & 1) | 1;
152  dst[j++] = value * ((escape + flag + 1) >> 1);
153  i++;
154  if (j == width) {
155  j = 0;
156  dst += stride;
157  }
158  state = 120 * (escape + flag) + state - (120 * state >> 8);
159  flag = 0;
160 
161  if (state * 4ULL > 0xFF || i >= size)
162  continue;
163 
164  nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
165  escape = av_zero_extend(16383, nbits);
166  cnt1 = get_unary(bc, 0, 8);
167  if (cnt1 > 7) {
168  rlen = get_bits(bc, 16);
169  } else {
170  value = show_bits(bc, nbits);
171  if (value > 1) {
172  skip_bits(bc, nbits);
173  rlen = value + escape * cnt1 - 1;
174  } else {
175  skip_bits(bc, nbits - 1);
176  rlen = escape * cnt1;
177  }
178  }
179 
180  if (rlen > size - i)
181  return AVERROR_INVALIDDATA;
182  i += rlen;
183 
184  for (k = 0; k < rlen; k++) {
185  dst[j++] = 0;
186  if (j == width) {
187  j = 0;
188  dst += stride;
189  }
190  }
191 
192  state = 0;
193  flag = rlen < 0xFFFF ? 1 : 0;
194  }
195 
196  align_get_bits(bc);
197  return get_bits_count(bc) >> 3;
198 }
199 
200 static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst,
201  int size, int c, int a, int d,
202  int width, ptrdiff_t stride)
203 {
204  PixletContext *ctx = avctx->priv_data;
205  GetBitContext *bc = &ctx->bc;
206  unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
207  int ret, escape, pfx, value, yflag, xflag, flag = 0;
208  int64_t state = 3, tmp;
209 
211  if (ret < 0)
212  return ret;
213 
214  if (a ^ (a >> 31)) {
215  nbits = 33 - ff_clz(a ^ (a >> 31));
216  if (nbits > 16)
217  return AVERROR_INVALIDDATA;
218  } else {
219  nbits = 1;
220  }
221 
222  length = 25 - nbits;
223 
224  while (i < size) {
225  if (((state >> 8) + 3) & 0xFFFFFFF)
226  value = ff_clz((state >> 8) + 3) ^ 0x1F;
227  else
228  value = -1;
229 
230  cnt1 = get_unary(bc, 0, length);
231  if (cnt1 >= length) {
232  cnt1 = get_bits(bc, nbits);
233  } else {
234  pfx = FFMIN(value, 14);
235  if (pfx < 1)
236  return AVERROR_INVALIDDATA;
237  cnt1 *= (1 << pfx) - 1;
238  shbits = show_bits(bc, pfx);
239  if (shbits <= 1) {
240  skip_bits(bc, pfx - 1);
241  } else {
242  skip_bits(bc, pfx);
243  cnt1 += shbits - 1;
244  }
245  }
246 
247  xflag = flag + cnt1;
248  yflag = xflag;
249 
250  if (flag + cnt1 == 0) {
251  value = 0;
252  } else {
253  xflag &= 1u;
254  tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
255  value = xflag + (tmp ^ -xflag);
256  }
257 
258  i++;
259  dst[j++] = value;
260  if (j == width) {
261  j = 0;
262  dst += stride;
263  }
264  state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
265 
266  flag = 0;
267 
268  if ((uint64_t)state > 0xFF / 4 || i >= size)
269  continue;
270 
271  pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
272  escape = av_zero_extend(16383, pfx);
273  cnt1 = get_unary(bc, 0, 8);
274  if (cnt1 < 8) {
275  if (pfx < 1 || pfx > 25)
276  return AVERROR_INVALIDDATA;
277 
278  value = show_bits(bc, pfx);
279  if (value > 1) {
280  skip_bits(bc, pfx);
281  rlen = value + escape * cnt1 - 1;
282  } else {
283  skip_bits(bc, pfx - 1);
284  rlen = escape * cnt1;
285  }
286  } else {
287  if (get_bits1(bc))
288  value = get_bits(bc, 16);
289  else
290  value = get_bits(bc, 8);
291 
292  rlen = value + 8 * escape;
293  }
294 
295  if (rlen > 0xFFFF || i + rlen > size)
296  return AVERROR_INVALIDDATA;
297  i += rlen;
298 
299  for (k = 0; k < rlen; k++) {
300  dst[j++] = 0;
301  if (j == width) {
302  j = 0;
303  dst += stride;
304  }
305  }
306 
307  state = 0;
308  flag = rlen < 0xFFFF ? 1 : 0;
309  }
310 
311  align_get_bits(bc);
312  return get_bits_count(bc) >> 3;
313 }
314 
315 static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr,
316  int plane, AVFrame *frame)
317 {
318  PixletContext *ctx = avctx->priv_data;
319  ptrdiff_t stride = frame->linesize[plane] / 2;
320  int i, ret;
321 
322  for (i = 0; i < ctx->levels * 3; i++) {
323  int32_t a = bytestream2_get_be32(&ctx->gb);
324  int32_t b = bytestream2_get_be32(&ctx->gb);
325  int32_t c = bytestream2_get_be32(&ctx->gb);
326  int32_t d = bytestream2_get_be32(&ctx->gb);
327  int16_t *dest = (int16_t *)frame->data[plane] +
328  ctx->band[plane][i + 1].x +
329  ctx->band[plane][i + 1].y * stride;
330  unsigned size = ctx->band[plane][i + 1].size;
331  uint32_t magic = bytestream2_get_be32(&ctx->gb);
332 
333  if (magic != PIXLET_MAGIC) {
334  av_log(avctx, AV_LOG_ERROR,
335  "wrong magic number: 0x%08"PRIX32" for plane %d, band %d\n",
336  magic, plane, i);
337  return AVERROR_INVALIDDATA;
338  }
339 
340  if (a == INT32_MIN)
341  return AVERROR_INVALIDDATA;
342 
343  ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
344  c, (b >= FFABS(a)) ? b : a, d,
345  ctx->band[plane][i + 1].width, stride);
346  if (ret < 0) {
347  av_log(avctx, AV_LOG_ERROR,
348  "error in highpass coefficients for plane %d, band %d\n",
349  plane, i);
350  return ret;
351  }
352  bytestream2_skip(&ctx->gb, ret);
353  }
354 
355  return 0;
356 }
357 
358 static void lowpass_prediction(int16_t *dst, int16_t *pred,
359  int width, int height, ptrdiff_t stride)
360 {
361  int16_t val;
362  int i, j;
363 
364  memset(pred, 0, width * sizeof(*pred));
365 
366  for (i = 0; i < height; i++) {
367  val = pred[0] + dst[0];
368  dst[0] = pred[0] = val;
369  for (j = 1; j < width; j++) {
370  val = pred[j] + dst[j];
371  dst[j] = pred[j] = val;
372  dst[j] += dst[j-1];
373  }
374  dst += stride;
375  }
376 }
377 
378 static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
379 {
380  int16_t *low, *high, *ll, *lh, *hl, *hh;
381  int hsize, i, j;
382  int64_t value;
383 
384  hsize = size >> 1;
385  low = tmp + 4;
386  high = &low[hsize + 8];
387 
388  memcpy(low, dest, size);
389  memcpy(high, dest + hsize, size);
390 
391  ll = &low[hsize];
392  lh = &low[hsize];
393  hl = &high[hsize];
394  hh = hl;
395  for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
396  low[i - 5] = low[j - 1];
397  lh[0] = ll[-1];
398  high[i - 5] = high[j - 2];
399  hh[0] = hl[-2];
400  }
401 
402  for (i = 0; i < hsize; i++) {
403  value = (int64_t) low [i + 1] * -INT64_C(325392907) +
404  (int64_t) low [i + 0] * INT64_C(3687786320) +
405  (int64_t) low [i - 1] * -INT64_C(325392907) +
406  (int64_t) high[i + 0] * INT64_C(1518500249) +
407  (int64_t) high[i - 1] * INT64_C(1518500249);
408  dest[i * 2] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
409  }
410 
411  for (i = 0; i < hsize; i++) {
412  value = (int64_t) low [i + 2] * -INT64_C(65078576) +
413  (int64_t) low [i + 1] * INT64_C(1583578880) +
414  (int64_t) low [i + 0] * INT64_C(1583578880) +
415  (int64_t) low [i - 1] * -INT64_C(65078576) +
416  (int64_t) high[i + 1] * INT64_C(303700064) +
417  (int64_t) high[i + 0] * -INT64_C(3644400640) +
418  (int64_t) high[i - 1] * INT64_C(303700064);
419  dest[i * 2 + 1] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
420  }
421 }
422 
423 static void reconstruction(AVCodecContext *avctx, int16_t *dest,
424  unsigned width, unsigned height, ptrdiff_t stride,
425  int64_t *scaling_h, int64_t *scaling_v)
426 {
427  PixletContext *ctx = avctx->priv_data;
428  unsigned scaled_width, scaled_height;
429  int16_t *ptr, *tmp;
430  int i, j, k;
431 
432  scaled_width = width >> NB_LEVELS;
433  scaled_height = height >> NB_LEVELS;
434  tmp = ctx->filter[0];
435 
436  for (i = 0; i < NB_LEVELS; i++) {
437  int64_t scale_v = scaling_v[i];
438  int64_t scale_h = scaling_h[i];
439  scaled_width <<= 1;
440  scaled_height <<= 1;
441 
442  ptr = dest;
443  for (j = 0; j < scaled_height; j++) {
444  filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
445  ptr += stride;
446  }
447 
448  for (j = 0; j < scaled_width; j++) {
449  ptr = dest + j;
450  for (k = 0; k < scaled_height; k++) {
451  tmp[k] = *ptr;
452  ptr += stride;
453  }
454 
455  filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
456 
457  ptr = dest + j;
458  for (k = 0; k < scaled_height; k++) {
459  *ptr = tmp[k];
460  ptr += stride;
461  }
462  }
463  }
464 }
465 
466 static void build_luma_lut(AVCodecContext *avctx, int depth)
467 {
468  PixletContext *ctx = avctx->priv_data;
469  int max = (1 << depth) - 1;
470 
471  if (ctx->depth == depth)
472  return;
473  ctx->depth = depth;
474 
475  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->lut); i++)
476  ctx->lut[i] = ((int64_t)i * i * 65535LL) / max / max;
477 }
478 
480  int w, int h, int depth)
481 {
482  PixletContext *ctx = avctx->priv_data;
483  uint16_t *dsty = (uint16_t *)frame->data[0];
484  int16_t *srcy = (int16_t *)frame->data[0];
485  ptrdiff_t stridey = frame->linesize[0] / 2;
486  uint16_t *lut = ctx->lut;
487  int i, j;
488 
489  for (j = 0; j < h; j++) {
490  for (i = 0; i < w; i++) {
491  if (srcy[i] <= 0)
492  dsty[i] = 0;
493  else if (srcy[i] > ((1 << depth) - 1))
494  dsty[i] = 65535;
495  else
496  dsty[i] = lut[srcy[i]];
497  }
498  dsty += stridey;
499  srcy += stridey;
500  }
501 }
502 
503 static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
504 {
505  uint16_t *dstu = (uint16_t *)frame->data[1];
506  uint16_t *dstv = (uint16_t *)frame->data[2];
507  int16_t *srcu = (int16_t *)frame->data[1];
508  int16_t *srcv = (int16_t *)frame->data[2];
509  ptrdiff_t strideu = frame->linesize[1] / 2;
510  ptrdiff_t stridev = frame->linesize[2] / 2;
511  const unsigned add = 1 << (depth - 1);
512  const unsigned shift = 16 - depth;
513  int i, j;
514 
515  for (j = 0; j < h; j++) {
516  for (i = 0; i < w; i++) {
517  dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
518  dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
519  }
520  dstu += strideu;
521  dstv += stridev;
522  srcu += strideu;
523  srcv += stridev;
524  }
525 }
526 
527 static int decode_plane(AVCodecContext *avctx, int plane,
528  const AVPacket *avpkt, AVFrame *frame)
529 {
530  PixletContext *ctx = avctx->priv_data;
531  ptrdiff_t stride = frame->linesize[plane] / 2;
532  unsigned shift = plane > 0;
533  int16_t *dst;
534  int i, ret;
535 
536  for (i = ctx->levels - 1; i >= 0; i--) {
537  int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
538  int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
539 
540  if (!h || !v)
541  return AVERROR_INVALIDDATA;
542 
543  ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
544  ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
545  }
546 
547  bytestream2_skip(&ctx->gb, 4);
548 
549  dst = (int16_t *)frame->data[plane];
550  dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
551 
552  ret = init_get_bits8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
554  if (ret < 0)
555  return ret;
556 
557  ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
558  ctx->band[plane][0].width - 1, 0);
559  if (ret < 0) {
560  av_log(avctx, AV_LOG_ERROR,
561  "error in lowpass coefficients for plane %d, top row\n", plane);
562  return ret;
563  }
564 
565  ret = read_low_coeffs(avctx, dst + stride,
566  ctx->band[plane][0].height - 1, 1, stride);
567  if (ret < 0) {
568  av_log(avctx, AV_LOG_ERROR,
569  "error in lowpass coefficients for plane %d, left column\n",
570  plane);
571  return ret;
572  }
573 
574  ret = read_low_coeffs(avctx, dst + stride + 1,
575  (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
576  ctx->band[plane][0].width - 1, stride);
577  if (ret < 0) {
578  av_log(avctx, AV_LOG_ERROR,
579  "error in lowpass coefficients for plane %d, rest\n", plane);
580  return ret;
581  }
582 
583  bytestream2_skip(&ctx->gb, ret);
584  if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
585  av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
586  return AVERROR_INVALIDDATA;
587  }
588 
589  ret = read_highpass(avctx, avpkt->data, plane, frame);
590  if (ret < 0)
591  return ret;
592 
593  lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
594  ctx->band[plane][0].height, stride);
595 
596  reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
597  ctx->h >> shift, stride, ctx->scaling[plane][H],
598  ctx->scaling[plane][V]);
599 
600  return 0;
601 }
602 
604  int *got_frame, AVPacket *avpkt)
605 {
606  PixletContext *ctx = avctx->priv_data;
607  int i, w, h, width, height, ret, version;
608  uint32_t pktsize, depth;
609 
610  bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
611 
612  pktsize = bytestream2_get_be32(&ctx->gb);
613  if (pktsize <= 44 + (NB_LEVELS * 8 + 6) * 3 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
614  av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
615  return AVERROR_INVALIDDATA;
616  }
617 
618  version = bytestream2_get_le32(&ctx->gb);
619  if (version != 1)
620  avpriv_request_sample(avctx, "Version %d", version);
621 
622  bytestream2_skip(&ctx->gb, 4);
623  if (bytestream2_get_be32(&ctx->gb) != 1)
624  return AVERROR_INVALIDDATA;
625  bytestream2_skip(&ctx->gb, 4);
626 
627  width = bytestream2_get_be32(&ctx->gb);
628  height = bytestream2_get_be32(&ctx->gb);
629 
630  if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
631  || height > INT_MAX - (1U << (NB_LEVELS + 1)))
632  return AVERROR_INVALIDDATA;
633 
634  w = FFALIGN(width, 1 << (NB_LEVELS + 1));
635  h = FFALIGN(height, 1 << (NB_LEVELS + 1));
636 
637  ctx->levels = bytestream2_get_be32(&ctx->gb);
638  if (ctx->levels != NB_LEVELS)
639  return AVERROR_INVALIDDATA;
640  depth = bytestream2_get_be32(&ctx->gb);
641  if (depth < 8 || depth > 15) {
642  avpriv_request_sample(avctx, "Depth %d", depth);
643  return AVERROR_INVALIDDATA;
644  }
645 
646  build_luma_lut(avctx, depth);
647 
648  ret = ff_set_dimensions(avctx, w, h);
649  if (ret < 0)
650  return ret;
651  avctx->width = width;
652  avctx->height = height;
653 
654  if (ctx->w != w || ctx->h != h) {
655  free_buffers(avctx);
656  ctx->w = w;
657  ctx->h = h;
658 
659  ret = init_decoder(avctx);
660  if (ret < 0) {
661  free_buffers(avctx);
662  ctx->w = 0;
663  ctx->h = 0;
664  return ret;
665  }
666  }
667 
668  bytestream2_skip(&ctx->gb, 8);
669 
671 
672  ret = ff_thread_get_buffer(avctx, p, 0);
673  if (ret < 0)
674  return ret;
675 
676  for (i = 0; i < 3; i++) {
677  ret = decode_plane(avctx, i, avpkt, p);
678  if (ret < 0)
679  return ret;
680  if (avctx->flags & AV_CODEC_FLAG_GRAY)
681  break;
682  }
683 
684  postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
685  postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
686 
687  *got_frame = 1;
688 
689  return pktsize;
690 }
691 
693  .p.name = "pixlet",
694  CODEC_LONG_NAME("Apple Pixlet"),
695  .p.type = AVMEDIA_TYPE_VIDEO,
696  .p.id = AV_CODEC_ID_PIXLET,
697  .init = pixlet_init,
698  .close = pixlet_close,
700  .priv_data_size = sizeof(PixletContext),
701  .p.capabilities = AV_CODEC_CAP_DR1 |
703  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
704 };
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:668
V
#define V
Definition: pixlet.c:40
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
free_buffers
static void free_buffers(AVCodecContext *avctx)
Definition: pixlet.c:72
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
PixletContext
Definition: pixlet.c:48
int64_t
long long int64_t
Definition: coverity.c:34
pixlet_decode_frame
static int pixlet_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: pixlet.c:603
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ff_clz
#define ff_clz
Definition: intmath.h:143
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:280
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:717
read_low_coeffs
static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride)
Definition: pixlet.c:125
AVPacket::data
uint8_t * data
Definition: packet.h:539
PixletContext::w
int w
Definition: pixlet.c:56
b
#define b
Definition: input.c:41
high
int high
Definition: dovi_rpuenc.c:38
SubBand::x
unsigned x
Definition: pixlet.c:45
PixletContext::bc
GetBitContext bc
Definition: pixlet.c:52
FFCodec
Definition: codec_internal.h:127
PixletContext::depth
int depth
Definition: pixlet.c:55
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
SubBand::y
unsigned y
Definition: pixlet.c:45
thread.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
PixletContext::filter
int16_t * filter[2]
Definition: pixlet.c:58
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_CODEC_ID_PIXLET
@ AV_CODEC_ID_PIXLET
Definition: codec_id.h:278
GetBitContext
Definition: get_bits.h:108
PixletContext::lut
uint16_t lut[65536]
Definition: pixlet.c:61
read_high_coeffs
static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst, int size, int c, int a, int d, int width, ptrdiff_t stride)
Definition: pixlet.c:200
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
val
static double val(void *priv, double ch)
Definition: aeval.c:77
filterfn
static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
Definition: pixlet.c:378
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
decode_plane
static int decode_plane(AVCodecContext *avctx, int plane, const AVPacket *avpkt, AVFrame *frame)
Definition: pixlet.c:527
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:513
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
get_bits.h
SubBand::size
unsigned size
Definition: pixlet.c:44
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:701
state
static struct @466 state
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
postprocess_chroma
static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:503
PixletContext::levels
int levels
Definition: pixlet.c:54
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
build_luma_lut
static void build_luma_lut(AVCodecContext *avctx, int depth)
Definition: pixlet.c:466
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
height
#define height
Definition: dsp.h:85
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
size
int size
Definition: twinvq_data.h:10344
SubBand
Definition: cfhd.h:108
PixletContext::band
SubBand band[4][NB_LEVELS *3+1]
Definition: pixlet.c:62
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
H
#define H
Definition: pixlet.c:39
version
version
Definition: libkvazaar.c:321
av_zero_extend
#define av_zero_extend
Definition: common.h:151
unary.h
PixletContext::gb
GetByteContext gb
Definition: pixlet.c:51
PixletContext::h
int h
Definition: pixlet.c:56
pixlet_init
static av_cold int pixlet_init(AVCodecContext *avctx)
Definition: pixlet.c:65
flag
#define flag(name)
Definition: cbs_av1.c:474
postprocess_luma
static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:479
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
lowpass_prediction
static void lowpass_prediction(int16_t *dst, int16_t *pred, int width, int height, ptrdiff_t stride)
Definition: pixlet.c:358
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
PIXLET_MAGIC
#define PIXLET_MAGIC
Definition: pixlet.c:37
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::height
int height
Definition: avcodec.h:624
PixletContext::prediction
int16_t * prediction
Definition: pixlet.c:59
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
pixlet_close
static av_cold int pixlet_close(AVCodecContext *avctx)
Definition: pixlet.c:81
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
SubBand::width
unsigned width
Definition: pixlet.c:43
pred
static const float pred[4]
Definition: siprdata.h:259
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
reconstruction
static void reconstruction(AVCodecContext *avctx, int16_t *dest, unsigned width, unsigned height, ptrdiff_t stride, int64_t *scaling_h, int64_t *scaling_v)
Definition: pixlet.c:423
ff_pixlet_decoder
const FFCodec ff_pixlet_decoder
Definition: pixlet.c:692
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:451
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
PixletContext::scaling
int64_t scaling[4][2][NB_LEVELS]
Definition: pixlet.c:60
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
read_highpass
static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr, int plane, AVFrame *frame)
Definition: pixlet.c:315
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
init_decoder
static int init_decoder(AVCodecContext *avctx)
Definition: pixlet.c:90
h
h
Definition: vp9dsp_template.c:2070
NB_LEVELS
#define NB_LEVELS
Definition: pixlet.c:35
width
#define width
Definition: dsp.h:85
src
#define src
Definition: vp8dsp.c:248
SubBand::height
int height
Definition: cfhd.h:113
intmath.h