FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "get_bits.h"
32 #include "huffyuvdsp.h"
33 #include "internal.h"
34 #include "lossless_videodsp.h"
35 #include "thread.h"
36 
37 typedef struct Slice {
38  uint32_t start;
39  uint32_t size;
40 } Slice;
41 
42 typedef enum Prediction {
43  LEFT = 1,
46 } Prediction;
47 
48 typedef struct HuffEntry {
50  uint16_t code;
51 } HuffEntry;
52 
53 typedef struct MagicYUVContext {
55  int max;
56  int bps;
58  int nb_slices;
59  int planes; // number of encoded planes in bitstream
60  int decorrelate; // postprocessing work
61  int color_matrix; // video color matrix
62  int flags;
63  int interlaced; // video is interlaced
64  const uint8_t *buf; // pointer to AVPacket->data
65  int hshift[4];
66  int vshift[4];
67  Slice *slices[4]; // slice bitstream positions for each plane
68  unsigned int slices_size[4]; // slice sizes for each plane
69  VLC vlc[4]; // VLC for each plane
70  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
71  int j, int threadnr);
74 
75 static int huff_build(HuffEntry he[], uint16_t codes_count[33],
76  VLC *vlc, int nb_elems)
77 {
78  unsigned nb_codes = 0, max = 0;
79 
80  for (int i = 32; i > 0; i--) {
81  uint16_t curr = codes_count[i]; // # of leafs of length i
82  codes_count[i] = nb_codes / 2; // # of non-leaf nodes on level i
83  nb_codes = codes_count[i] + curr; // # of nodes on level i
84  if (curr && !max)
85  max = i;
86  }
87 
88  for (unsigned i = 0; i < nb_elems; i++) {
89  he[i].code = codes_count[he[i].len];
90  codes_count[he[i].len]++;
91  }
92  return init_vlc(vlc, FFMIN(max, 12), nb_elems,
93  &he[0].len, sizeof(he[0]), sizeof(he[0].len),
94  &he[0].code, sizeof(he[0]), sizeof(he[0].code), 0);
95 }
96 
97 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
98  const uint16_t *diff, intptr_t w,
99  int *left, int *left_top, int max)
100 {
101  int i;
102  uint16_t l, lt;
103 
104  l = *left;
105  lt = *left_top;
106 
107  for (i = 0; i < w; i++) {
108  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
109  l &= max;
110  lt = src1[i];
111  dst[i] = l;
112  }
113 
114  *left = l;
115  *left_top = lt;
116 }
117 
118 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
119  int j, int threadnr)
120 {
121  MagicYUVContext *s = avctx->priv_data;
122  int interlaced = s->interlaced;
123  const int bps = s->bps;
124  const int max = s->max - 1;
125  AVFrame *p = s->p;
126  int i, k, x;
127  GetBitContext gb;
128  uint16_t *dst;
129 
130  for (i = 0; i < s->planes; i++) {
131  int left, lefttop, top;
132  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
133  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
134  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
135  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
136  ptrdiff_t stride = p->linesize[i] / 2;
137  int flags, pred;
138  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
139  s->slices[i][j].size);
140 
141  if (ret < 0)
142  return ret;
143 
144  flags = get_bits(&gb, 8);
145  pred = get_bits(&gb, 8);
146 
147  dst = (uint16_t *)p->data[i] + j * sheight * stride;
148  if (flags & 1) {
149  if (get_bits_left(&gb) < bps * width * height)
150  return AVERROR_INVALIDDATA;
151  for (k = 0; k < height; k++) {
152  for (x = 0; x < width; x++)
153  dst[x] = get_bits(&gb, bps);
154 
155  dst += stride;
156  }
157  } else {
158  for (k = 0; k < height; k++) {
159  for (x = 0; x < width; x++) {
160  int pix;
161  if (get_bits_left(&gb) <= 0)
162  return AVERROR_INVALIDDATA;
163 
164  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
165  if (pix < 0)
166  return AVERROR_INVALIDDATA;
167 
168  dst[x] = pix;
169  }
170  dst += stride;
171  }
172  }
173 
174  switch (pred) {
175  case LEFT:
176  dst = (uint16_t *)p->data[i] + j * sheight * stride;
177  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
178  dst += stride;
179  if (interlaced) {
180  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
181  dst += stride;
182  }
183  for (k = 1 + interlaced; k < height; k++) {
184  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
185  dst += stride;
186  }
187  break;
188  case GRADIENT:
189  dst = (uint16_t *)p->data[i] + j * sheight * stride;
190  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
191  dst += stride;
192  if (interlaced) {
193  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
194  dst += stride;
195  }
196  for (k = 1 + interlaced; k < height; k++) {
197  top = dst[-fake_stride];
198  left = top + dst[0];
199  dst[0] = left & max;
200  for (x = 1; x < width; x++) {
201  top = dst[x - fake_stride];
202  lefttop = dst[x - (fake_stride + 1)];
203  left += top - lefttop + dst[x];
204  dst[x] = left & max;
205  }
206  dst += stride;
207  }
208  break;
209  case MEDIAN:
210  dst = (uint16_t *)p->data[i] + j * sheight * stride;
211  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
212  dst += stride;
213  if (interlaced) {
214  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
215  dst += stride;
216  }
217  lefttop = left = dst[0];
218  for (k = 1 + interlaced; k < height; k++) {
219  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
220  lefttop = left = dst[0];
221  dst += stride;
222  }
223  break;
224  default:
225  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
226  }
227  }
228 
229  if (s->decorrelate) {
230  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
231  int width = avctx->coded_width;
232  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
233  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
234  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
235 
236  for (i = 0; i < height; i++) {
237  for (k = 0; k < width; k++) {
238  b[k] = (b[k] + g[k]) & max;
239  r[k] = (r[k] + g[k]) & max;
240  }
241  b += p->linesize[0] / 2;
242  g += p->linesize[1] / 2;
243  r += p->linesize[2] / 2;
244  }
245  }
246 
247  return 0;
248 }
249 
250 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
251  int j, int threadnr)
252 {
253  MagicYUVContext *s = avctx->priv_data;
254  int interlaced = s->interlaced;
255  AVFrame *p = s->p;
256  int i, k, x, min_width;
257  GetBitContext gb;
258  uint8_t *dst;
259 
260  for (i = 0; i < s->planes; i++) {
261  int left, lefttop, top;
262  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
263  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
264  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
265  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
266  ptrdiff_t stride = p->linesize[i];
267  const uint8_t *slice = s->buf + s->slices[i][j].start;
268  int flags, pred;
269 
270  flags = bytestream_get_byte(&slice);
271  pred = bytestream_get_byte(&slice);
272 
273  dst = p->data[i] + j * sheight * stride;
274  if (flags & 1) {
275  if (s->slices[i][j].size - 2 < width * height)
276  return AVERROR_INVALIDDATA;
277  for (k = 0; k < height; k++) {
278  bytestream_get_buffer(&slice, dst, width);
279  dst += stride;
280  }
281  } else {
282  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
283 
284  if (ret < 0)
285  return ret;
286 
287  for (k = 0; k < height; k++) {
288  for (x = 0; x < width; x++) {
289  int pix;
290  if (get_bits_left(&gb) <= 0)
291  return AVERROR_INVALIDDATA;
292 
293  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
294  if (pix < 0)
295  return AVERROR_INVALIDDATA;
296 
297  dst[x] = pix;
298  }
299  dst += stride;
300  }
301  }
302 
303  switch (pred) {
304  case LEFT:
305  dst = p->data[i] + j * sheight * stride;
306  s->llviddsp.add_left_pred(dst, dst, width, 0);
307  dst += stride;
308  if (interlaced) {
309  s->llviddsp.add_left_pred(dst, dst, width, 0);
310  dst += stride;
311  }
312  for (k = 1 + interlaced; k < height; k++) {
313  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
314  dst += stride;
315  }
316  break;
317  case GRADIENT:
318  dst = p->data[i] + j * sheight * stride;
319  s->llviddsp.add_left_pred(dst, dst, width, 0);
320  dst += stride;
321  if (interlaced) {
322  s->llviddsp.add_left_pred(dst, dst, width, 0);
323  dst += stride;
324  }
325  min_width = FFMIN(width, 32);
326  for (k = 1 + interlaced; k < height; k++) {
327  top = dst[-fake_stride];
328  left = top + dst[0];
329  dst[0] = left;
330  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
331  top = dst[x - fake_stride];
332  lefttop = dst[x - (fake_stride + 1)];
333  left += top - lefttop + dst[x];
334  dst[x] = left;
335  }
336  if (width > 32)
337  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
338  dst += stride;
339  }
340  break;
341  case MEDIAN:
342  dst = p->data[i] + j * sheight * stride;
343  s->llviddsp.add_left_pred(dst, dst, width, 0);
344  dst += stride;
345  if (interlaced) {
346  s->llviddsp.add_left_pred(dst, dst, width, 0);
347  dst += stride;
348  }
349  lefttop = left = dst[0];
350  for (k = 1 + interlaced; k < height; k++) {
351  s->llviddsp.add_median_pred(dst, dst - fake_stride,
352  dst, width, &left, &lefttop);
353  lefttop = left = dst[0];
354  dst += stride;
355  }
356  break;
357  default:
358  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
359  }
360  }
361 
362  if (s->decorrelate) {
363  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
364  int width = avctx->coded_width;
365  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
366  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
367  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
368 
369  for (i = 0; i < height; i++) {
370  s->llviddsp.add_bytes(b, g, width);
371  s->llviddsp.add_bytes(r, g, width);
372  b += p->linesize[0];
373  g += p->linesize[1];
374  r += p->linesize[2];
375  }
376  }
377 
378  return 0;
379 }
380 
381 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
382  int table_size, int max)
383 {
384  MagicYUVContext *s = avctx->priv_data;
385  GetByteContext gb;
386  HuffEntry he[4096];
387  uint16_t length_count[33] = { 0 };
388  int i = 0, j = 0, k;
389 
390  bytestream2_init(&gb, table, table_size);
391 
392  while (bytestream2_get_bytes_left(&gb) > 0) {
393  int b = bytestream2_peek_byteu(&gb) & 0x80;
394  int x = bytestream2_get_byteu(&gb) & ~0x80;
395  int l = 1;
396 
397  if (b) {
398  if (bytestream2_get_bytes_left(&gb) <= 0)
399  break;
400  l += bytestream2_get_byteu(&gb);
401  }
402  k = j + l;
403  if (k > max || x == 0 || x > 32) {
404  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
405  return AVERROR_INVALIDDATA;
406  }
407 
408  length_count[x] += l;
409  for (; j < k; j++)
410  he[j].len = x;
411 
412  if (j == max) {
413  j = 0;
414  if (huff_build(he, length_count, &s->vlc[i], max)) {
415  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
416  return AVERROR_INVALIDDATA;
417  }
418  i++;
419  if (i == s->planes) {
420  break;
421  }
422  memset(length_count, 0, sizeof(length_count));
423  }
424  }
425 
426  if (i != s->planes) {
427  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
428  return AVERROR_INVALIDDATA;
429  }
430 
431  return 0;
432 }
433 
434 static int magy_decode_frame(AVCodecContext *avctx, void *data,
435  int *got_frame, AVPacket *avpkt)
436 {
437  MagicYUVContext *s = avctx->priv_data;
438  ThreadFrame frame = { .f = data };
439  AVFrame *p = data;
440  GetByteContext gb;
441  uint32_t first_offset, offset, next_offset, header_size, slice_width;
442  int width, height, format, version, table_size;
443  int ret, i, j;
444 
445  if (avpkt->size < 36)
446  return AVERROR_INVALIDDATA;
447 
448  bytestream2_init(&gb, avpkt->data, avpkt->size);
449  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
450  return AVERROR_INVALIDDATA;
451 
452  header_size = bytestream2_get_le32u(&gb);
453  if (header_size < 32 || header_size >= avpkt->size) {
454  av_log(avctx, AV_LOG_ERROR,
455  "header or packet too small %"PRIu32"\n", header_size);
456  return AVERROR_INVALIDDATA;
457  }
458 
459  version = bytestream2_get_byteu(&gb);
460  if (version != 7) {
461  avpriv_request_sample(avctx, "Version %d", version);
462  return AVERROR_PATCHWELCOME;
463  }
464 
465  s->hshift[1] =
466  s->vshift[1] =
467  s->hshift[2] =
468  s->vshift[2] = 0;
469  s->decorrelate = 0;
470  s->bps = 8;
471 
472  format = bytestream2_get_byteu(&gb);
473  switch (format) {
474  case 0x65:
475  avctx->pix_fmt = AV_PIX_FMT_GBRP;
476  s->decorrelate = 1;
477  break;
478  case 0x66:
479  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
480  s->decorrelate = 1;
481  break;
482  case 0x67:
483  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
484  break;
485  case 0x68:
486  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
487  s->hshift[1] =
488  s->hshift[2] = 1;
489  break;
490  case 0x69:
491  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
492  s->hshift[1] =
493  s->vshift[1] =
494  s->hshift[2] =
495  s->vshift[2] = 1;
496  break;
497  case 0x6a:
498  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
499  break;
500  case 0x6b:
501  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
502  break;
503  case 0x6c:
504  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
505  s->hshift[1] =
506  s->hshift[2] = 1;
507  s->bps = 10;
508  break;
509  case 0x76:
510  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
511  s->bps = 10;
512  break;
513  case 0x6d:
514  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
515  s->decorrelate = 1;
516  s->bps = 10;
517  break;
518  case 0x6e:
519  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
520  s->decorrelate = 1;
521  s->bps = 10;
522  break;
523  case 0x6f:
524  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
525  s->decorrelate = 1;
526  s->bps = 12;
527  break;
528  case 0x70:
529  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
530  s->decorrelate = 1;
531  s->bps = 12;
532  break;
533  case 0x73:
534  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
535  s->bps = 10;
536  break;
537  case 0x7b:
538  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
539  s->hshift[1] =
540  s->vshift[1] =
541  s->hshift[2] =
542  s->vshift[2] = 1;
543  s->bps = 10;
544  break;
545  default:
546  avpriv_request_sample(avctx, "Format 0x%X", format);
547  return AVERROR_PATCHWELCOME;
548  }
549  s->max = 1 << s->bps;
552 
553  bytestream2_skipu(&gb, 1);
554  s->color_matrix = bytestream2_get_byteu(&gb);
555  s->flags = bytestream2_get_byteu(&gb);
556  s->interlaced = !!(s->flags & 2);
557  bytestream2_skipu(&gb, 3);
558 
559  width = bytestream2_get_le32u(&gb);
560  height = bytestream2_get_le32u(&gb);
561  ret = ff_set_dimensions(avctx, width, height);
562  if (ret < 0)
563  return ret;
564 
565  slice_width = bytestream2_get_le32u(&gb);
566  if (slice_width != avctx->coded_width) {
567  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
568  return AVERROR_PATCHWELCOME;
569  }
570  s->slice_height = bytestream2_get_le32u(&gb);
571  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
572  av_log(avctx, AV_LOG_ERROR,
573  "invalid slice height: %d\n", s->slice_height);
574  return AVERROR_INVALIDDATA;
575  }
576 
577  bytestream2_skipu(&gb, 4);
578 
579  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
580  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
581  av_log(avctx, AV_LOG_ERROR,
582  "invalid number of slices: %d\n", s->nb_slices);
583  return AVERROR_INVALIDDATA;
584  }
585 
586  if (s->interlaced) {
587  if ((s->slice_height >> s->vshift[1]) < 2) {
588  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
589  return AVERROR_INVALIDDATA;
590  }
591  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
592  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
593  return AVERROR_INVALIDDATA;
594  }
595  }
596 
597  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
598  return AVERROR_INVALIDDATA;
599  for (i = 0; i < s->planes; i++) {
600  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
601  if (!s->slices[i])
602  return AVERROR(ENOMEM);
603 
604  offset = bytestream2_get_le32u(&gb);
605  if (offset >= avpkt->size - header_size)
606  return AVERROR_INVALIDDATA;
607 
608  if (i == 0)
609  first_offset = offset;
610 
611  for (j = 0; j < s->nb_slices - 1; j++) {
612  s->slices[i][j].start = offset + header_size;
613 
614  next_offset = bytestream2_get_le32u(&gb);
615  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
616  return AVERROR_INVALIDDATA;
617 
618  s->slices[i][j].size = next_offset - offset;
619  if (s->slices[i][j].size < 2)
620  return AVERROR_INVALIDDATA;
621  offset = next_offset;
622  }
623 
624  s->slices[i][j].start = offset + header_size;
625  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
626  }
627 
628  if (bytestream2_get_byteu(&gb) != s->planes)
629  return AVERROR_INVALIDDATA;
630 
631  bytestream2_skipu(&gb, s->nb_slices * s->planes);
632 
633  table_size = header_size + first_offset - bytestream2_tell(&gb);
634  if (table_size < 2)
635  return AVERROR_INVALIDDATA;
636 
637  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
638  table_size, s->max);
639  if (ret < 0)
640  return ret;
641 
643  p->key_frame = 1;
644 
645  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
646  return ret;
647 
648  s->buf = avpkt->data;
649  s->p = p;
650  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
651 
652  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
653  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
654  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
655  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
656  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
657  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
658  FFSWAP(uint8_t*, p->data[0], p->data[1]);
659  FFSWAP(int, p->linesize[0], p->linesize[1]);
660  } else {
661  switch (s->color_matrix) {
662  case 1:
664  break;
665  case 2:
667  break;
668  }
670  }
671 
672  *got_frame = 1;
673 
674  return avpkt->size;
675 }
676 
678 {
679  MagicYUVContext *s = avctx->priv_data;
681  return 0;
682 }
683 
685 {
686  MagicYUVContext * const s = avctx->priv_data;
687  int i;
688 
689  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
690  av_freep(&s->slices[i]);
691  s->slices_size[i] = 0;
692  ff_free_vlc(&s->vlc[i]);
693  }
694 
695  return 0;
696 }
697 
699  .name = "magicyuv",
700  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
701  .type = AVMEDIA_TYPE_VIDEO,
702  .id = AV_CODEC_ID_MAGICYUV,
703  .priv_data_size = sizeof(MagicYUVContext),
705  .close = magy_decode_end,
707  .capabilities = AV_CODEC_CAP_DR1 |
710  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
711 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
version
Definition: libkvazaar.c:317
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
#define avpriv_request_sample(...)
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: packet.h:364
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
unsigned int slices_size[4]
Definition: magicyuv.c:68
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:381
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
LLVidDSPContext llviddsp
Definition: magicyuv.c:72
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
static int huff_build(HuffEntry he[], uint16_t codes_count[33], VLC *vlc, int nb_elems)
Definition: magicyuv.c:75
uint8_t
#define av_cold
Definition: attributes.h:88
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Multithreading support functions.
Prediction
Definition: magicyuv.c:42
#define height
uint8_t * data
Definition: packet.h:363
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
bitstream reader API header.
int hshift[4]
Definition: magicyuv.c:65
#define max(a, b)
Definition: cuda_runtime.h:33
uint16_t code
Definition: magicyuv.c:50
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
void(* add_gradient_pred)(uint8_t *src, const ptrdiff_t stride, const ptrdiff_t width)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:250
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int slice_height
Definition: magicyuv.c:57
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
#define FFMAX(a, b)
Definition: common.h:94
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
Slice * slices[4]
Definition: magicyuv.c:67
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
#define b
Definition: input.c:41
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
int vshift[4]
Definition: magicyuv.c:66
#define FFMIN(a, b)
Definition: common.h:96
uint8_t interlaced
Definition: mxfenc.c:2168
#define width
uint8_t w
Definition: llviddspenc.c:38
Definition: magicyuv.c:37
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
#define FF_ARRAY_ELEMS(a)
Full range content.
Definition: pixfmt.h:586
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:677
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1845
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
#define src1
Definition: h264pred.c:139
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
main external API structure.
Definition: avcodec.h:526
static int magy_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:434
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
uint32_t size
Definition: magicyuv.c:39
void ff_llviddsp_init(LLVidDSPContext *c)
uint32_t start
Definition: magicyuv.c:38
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
int coded_height
Definition: avcodec.h:714
AVCodec ff_magicyuv_decoder
Definition: magicyuv.c:698
Definition: magicyuv.c:43
#define mid_pred
Definition: mathops.h:97
VLC vlc[4]
Definition: magicyuv.c:69
AVFrame * p
Definition: magicyuv.c:54
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:118
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:70
#define flags(name, subs,...)
Definition: cbs_av1.c:560
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
Narrow or limited range content.
Definition: pixfmt.h:569
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:684
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal api header.
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
const uint8_t * buf
Definition: magicyuv.c:64
unsigned bps
Definition: movenc.c:1598
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:97
#define av_freep(p)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
#define MKTAG(a, b, c, d)
Definition: common.h:406
This structure stores compressed data.
Definition: packet.h:340
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:358
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int color_matrix
Definition: magicyuv.c:61