FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "decode.h"
33 #include "get_bits.h"
34 #include "lossless_videodsp.h"
35 #include "thread.h"
36 
37 typedef struct Slice {
38  uint32_t start;
39  uint32_t size;
40 } Slice;
41 
42 typedef enum Prediction {
43  LEFT = 1,
46 } Prediction;
47 
48 typedef struct HuffEntry {
49  uint8_t len;
50  uint16_t sym;
51 } HuffEntry;
52 
53 typedef struct MagicYUVContext {
55  int max;
56  int bps;
58  int nb_slices;
59  int planes; // number of encoded planes in bitstream
60  int decorrelate; // postprocessing work
61  int color_matrix; // video color matrix
62  int flags;
63  int interlaced; // video is interlaced
64  const uint8_t *buf; // pointer to AVPacket->data
65  int hshift[4];
66  int vshift[4];
67  Slice *slices[4]; // slice bitstream positions for each plane
68  unsigned int slices_size[4]; // slice sizes for each plane
69  VLC vlc[4]; // VLC for each plane
70  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
71  int j, int threadnr);
74 
75 static int huff_build(const uint8_t len[], uint16_t codes_pos[33],
76  VLC *vlc, int nb_elems, void *logctx)
77 {
78  HuffEntry he[4096];
79 
80  for (int i = 31; i > 0; i--)
81  codes_pos[i] += codes_pos[i + 1];
82 
83  for (unsigned i = nb_elems; i-- > 0;)
84  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
85 
86  ff_free_vlc(vlc);
87  return ff_init_vlc_from_lengths(vlc, FFMIN(he[0].len, 12), nb_elems,
88  &he[0].len, sizeof(he[0]),
89  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
90  0, 0, logctx);
91 }
92 
93 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
94  const uint16_t *diff, intptr_t w,
95  int *left, int *left_top, int max)
96 {
97  int i;
98  uint16_t l, lt;
99 
100  l = *left;
101  lt = *left_top;
102 
103  for (i = 0; i < w; i++) {
104  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
105  l &= max;
106  lt = src1[i];
107  dst[i] = l;
108  }
109 
110  *left = l;
111  *left_top = lt;
112 }
113 
114 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
115  int j, int threadnr)
116 {
117  const MagicYUVContext *s = avctx->priv_data;
118  int interlaced = s->interlaced;
119  const int bps = s->bps;
120  const int max = s->max - 1;
121  AVFrame *p = s->p;
122  int i, k, x;
123  GetBitContext gb;
124  uint16_t *dst;
125 
126  for (i = 0; i < s->planes; i++) {
127  int left, lefttop, top;
128  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
129  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
130  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
131  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
132  ptrdiff_t stride = p->linesize[i] / 2;
133  int flags, pred;
134  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
135  s->slices[i][j].size);
136 
137  if (ret < 0)
138  return ret;
139 
140  flags = get_bits(&gb, 8);
141  pred = get_bits(&gb, 8);
142 
143  dst = (uint16_t *)p->data[i] + j * sheight * stride;
144  if (flags & 1) {
145  if (get_bits_left(&gb) < bps * width * height)
146  return AVERROR_INVALIDDATA;
147  for (k = 0; k < height; k++) {
148  for (x = 0; x < width; x++)
149  dst[x] = get_bits(&gb, bps);
150 
151  dst += stride;
152  }
153  } else {
154  for (k = 0; k < height; k++) {
155  for (x = 0; x < width; x++) {
156  int pix;
157  if (get_bits_left(&gb) <= 0)
158  return AVERROR_INVALIDDATA;
159 
160  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
161  if (pix < 0)
162  return AVERROR_INVALIDDATA;
163 
164  dst[x] = pix;
165  }
166  dst += stride;
167  }
168  }
169 
170  switch (pred) {
171  case LEFT:
172  dst = (uint16_t *)p->data[i] + j * sheight * stride;
173  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
174  dst += stride;
175  if (interlaced) {
176  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
177  dst += stride;
178  }
179  for (k = 1 + interlaced; k < height; k++) {
180  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
181  dst += stride;
182  }
183  break;
184  case GRADIENT:
185  dst = (uint16_t *)p->data[i] + j * sheight * stride;
186  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
187  dst += stride;
188  if (interlaced) {
189  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
190  dst += stride;
191  }
192  for (k = 1 + interlaced; k < height; k++) {
193  top = dst[-fake_stride];
194  left = top + dst[0];
195  dst[0] = left & max;
196  for (x = 1; x < width; x++) {
197  top = dst[x - fake_stride];
198  lefttop = dst[x - (fake_stride + 1)];
199  left += top - lefttop + dst[x];
200  dst[x] = left & max;
201  }
202  dst += stride;
203  }
204  break;
205  case MEDIAN:
206  dst = (uint16_t *)p->data[i] + j * sheight * stride;
207  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
208  dst += stride;
209  if (interlaced) {
210  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
211  dst += stride;
212  }
213  lefttop = left = dst[0];
214  for (k = 1 + interlaced; k < height; k++) {
215  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
216  lefttop = left = dst[0];
217  dst += stride;
218  }
219  break;
220  default:
221  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
222  }
223  }
224 
225  if (s->decorrelate) {
226  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
227  int width = avctx->coded_width;
228  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
229  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
230  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
231 
232  for (i = 0; i < height; i++) {
233  for (k = 0; k < width; k++) {
234  b[k] = (b[k] + g[k]) & max;
235  r[k] = (r[k] + g[k]) & max;
236  }
237  b += p->linesize[0] / 2;
238  g += p->linesize[1] / 2;
239  r += p->linesize[2] / 2;
240  }
241  }
242 
243  return 0;
244 }
245 
246 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
247  int j, int threadnr)
248 {
249  const MagicYUVContext *s = avctx->priv_data;
250  int interlaced = s->interlaced;
251  AVFrame *p = s->p;
252  int i, k, x, min_width;
253  GetBitContext gb;
254  uint8_t *dst;
255 
256  for (i = 0; i < s->planes; i++) {
257  int left, lefttop, top;
258  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
259  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
260  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
261  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
262  ptrdiff_t stride = p->linesize[i];
263  const uint8_t *slice = s->buf + s->slices[i][j].start;
264  int flags, pred;
265 
266  flags = bytestream_get_byte(&slice);
267  pred = bytestream_get_byte(&slice);
268 
269  dst = p->data[i] + j * sheight * stride;
270  if (flags & 1) {
271  if (s->slices[i][j].size - 2 < width * height)
272  return AVERROR_INVALIDDATA;
273  for (k = 0; k < height; k++) {
274  bytestream_get_buffer(&slice, dst, width);
275  dst += stride;
276  }
277  } else {
278  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
279 
280  if (ret < 0)
281  return ret;
282 
283  for (k = 0; k < height; k++) {
284  for (x = 0; x < width; x++) {
285  int pix;
286  if (get_bits_left(&gb) <= 0)
287  return AVERROR_INVALIDDATA;
288 
289  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
290  if (pix < 0)
291  return AVERROR_INVALIDDATA;
292 
293  dst[x] = pix;
294  }
295  dst += stride;
296  }
297  }
298 
299  switch (pred) {
300  case LEFT:
301  dst = p->data[i] + j * sheight * stride;
302  s->llviddsp.add_left_pred(dst, dst, width, 0);
303  dst += stride;
304  if (interlaced) {
305  s->llviddsp.add_left_pred(dst, dst, width, 0);
306  dst += stride;
307  }
308  for (k = 1 + interlaced; k < height; k++) {
309  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
310  dst += stride;
311  }
312  break;
313  case GRADIENT:
314  dst = p->data[i] + j * sheight * stride;
315  s->llviddsp.add_left_pred(dst, dst, width, 0);
316  dst += stride;
317  if (interlaced) {
318  s->llviddsp.add_left_pred(dst, dst, width, 0);
319  dst += stride;
320  }
321  min_width = FFMIN(width, 32);
322  for (k = 1 + interlaced; k < height; k++) {
323  top = dst[-fake_stride];
324  left = top + dst[0];
325  dst[0] = left;
326  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
327  top = dst[x - fake_stride];
328  lefttop = dst[x - (fake_stride + 1)];
329  left += top - lefttop + dst[x];
330  dst[x] = left;
331  }
332  if (width > 32)
333  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
334  dst += stride;
335  }
336  break;
337  case MEDIAN:
338  dst = p->data[i] + j * sheight * stride;
339  s->llviddsp.add_left_pred(dst, dst, width, 0);
340  dst += stride;
341  if (interlaced) {
342  s->llviddsp.add_left_pred(dst, dst, width, 0);
343  dst += stride;
344  }
345  lefttop = left = dst[0];
346  for (k = 1 + interlaced; k < height; k++) {
347  s->llviddsp.add_median_pred(dst, dst - fake_stride,
348  dst, width, &left, &lefttop);
349  lefttop = left = dst[0];
350  dst += stride;
351  }
352  break;
353  default:
354  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
355  }
356  }
357 
358  if (s->decorrelate) {
359  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
360  int width = avctx->coded_width;
361  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
362  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
363  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
364 
365  for (i = 0; i < height; i++) {
366  s->llviddsp.add_bytes(b, g, width);
367  s->llviddsp.add_bytes(r, g, width);
368  b += p->linesize[0];
369  g += p->linesize[1];
370  r += p->linesize[2];
371  }
372  }
373 
374  return 0;
375 }
376 
377 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
378  int table_size, int max)
379 {
380  MagicYUVContext *s = avctx->priv_data;
381  GetByteContext gb;
382  uint8_t len[4096];
383  uint16_t length_count[33] = { 0 };
384  int i = 0, j = 0, k;
385 
386  bytestream2_init(&gb, table, table_size);
387 
388  while (bytestream2_get_bytes_left(&gb) > 0) {
389  int b = bytestream2_peek_byteu(&gb) & 0x80;
390  int x = bytestream2_get_byteu(&gb) & ~0x80;
391  int l = 1;
392 
393  if (b) {
394  if (bytestream2_get_bytes_left(&gb) <= 0)
395  break;
396  l += bytestream2_get_byteu(&gb);
397  }
398  k = j + l;
399  if (k > max || x == 0 || x > 32) {
400  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
401  return AVERROR_INVALIDDATA;
402  }
403 
404  length_count[x] += l;
405  for (; j < k; j++)
406  len[j] = x;
407 
408  if (j == max) {
409  j = 0;
410  if (huff_build(len, length_count, &s->vlc[i], max, avctx)) {
411  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
412  return AVERROR_INVALIDDATA;
413  }
414  i++;
415  if (i == s->planes) {
416  break;
417  }
418  memset(length_count, 0, sizeof(length_count));
419  }
420  }
421 
422  if (i != s->planes) {
423  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
424  return AVERROR_INVALIDDATA;
425  }
426 
427  return 0;
428 }
429 
431  int *got_frame, AVPacket *avpkt)
432 {
433  MagicYUVContext *s = avctx->priv_data;
434  GetByteContext gb;
435  uint32_t first_offset, offset, next_offset, header_size, slice_width;
436  int width, height, format, version, table_size;
437  int ret, i, j;
438 
439  if (avpkt->size < 36)
440  return AVERROR_INVALIDDATA;
441 
442  bytestream2_init(&gb, avpkt->data, avpkt->size);
443  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
444  return AVERROR_INVALIDDATA;
445 
446  header_size = bytestream2_get_le32u(&gb);
447  if (header_size < 32 || header_size >= avpkt->size) {
448  av_log(avctx, AV_LOG_ERROR,
449  "header or packet too small %"PRIu32"\n", header_size);
450  return AVERROR_INVALIDDATA;
451  }
452 
453  version = bytestream2_get_byteu(&gb);
454  if (version != 7) {
455  avpriv_request_sample(avctx, "Version %d", version);
456  return AVERROR_PATCHWELCOME;
457  }
458 
459  s->hshift[1] =
460  s->vshift[1] =
461  s->hshift[2] =
462  s->vshift[2] = 0;
463  s->decorrelate = 0;
464  s->bps = 8;
465 
466  format = bytestream2_get_byteu(&gb);
467  switch (format) {
468  case 0x65:
469  avctx->pix_fmt = AV_PIX_FMT_GBRP;
470  s->decorrelate = 1;
471  break;
472  case 0x66:
473  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
474  s->decorrelate = 1;
475  break;
476  case 0x67:
477  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
478  break;
479  case 0x68:
480  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
481  s->hshift[1] =
482  s->hshift[2] = 1;
483  break;
484  case 0x69:
485  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
486  s->hshift[1] =
487  s->vshift[1] =
488  s->hshift[2] =
489  s->vshift[2] = 1;
490  break;
491  case 0x6a:
492  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
493  break;
494  case 0x6b:
495  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
496  break;
497  case 0x6c:
498  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
499  s->hshift[1] =
500  s->hshift[2] = 1;
501  s->bps = 10;
502  break;
503  case 0x76:
504  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
505  s->bps = 10;
506  break;
507  case 0x6d:
508  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
509  s->decorrelate = 1;
510  s->bps = 10;
511  break;
512  case 0x6e:
513  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
514  s->decorrelate = 1;
515  s->bps = 10;
516  break;
517  case 0x6f:
518  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
519  s->decorrelate = 1;
520  s->bps = 12;
521  break;
522  case 0x70:
523  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
524  s->decorrelate = 1;
525  s->bps = 12;
526  break;
527  case 0x73:
528  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
529  s->bps = 10;
530  break;
531  case 0x7b:
532  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
533  s->hshift[1] =
534  s->vshift[1] =
535  s->hshift[2] =
536  s->vshift[2] = 1;
537  s->bps = 10;
538  break;
539  default:
540  avpriv_request_sample(avctx, "Format 0x%X", format);
541  return AVERROR_PATCHWELCOME;
542  }
543  s->max = 1 << s->bps;
544  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
545  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
546 
547  bytestream2_skipu(&gb, 1);
548  s->color_matrix = bytestream2_get_byteu(&gb);
549  s->flags = bytestream2_get_byteu(&gb);
550  s->interlaced = !!(s->flags & 2);
551  bytestream2_skipu(&gb, 3);
552 
553  width = bytestream2_get_le32u(&gb);
554  height = bytestream2_get_le32u(&gb);
555  ret = ff_set_dimensions(avctx, width, height);
556  if (ret < 0)
557  return ret;
558 
559  slice_width = bytestream2_get_le32u(&gb);
560  if (slice_width != avctx->coded_width) {
561  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
562  return AVERROR_PATCHWELCOME;
563  }
564  s->slice_height = bytestream2_get_le32u(&gb);
565  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
566  av_log(avctx, AV_LOG_ERROR,
567  "invalid slice height: %d\n", s->slice_height);
568  return AVERROR_INVALIDDATA;
569  }
570 
571  bytestream2_skipu(&gb, 4);
572 
573  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
574  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
575  av_log(avctx, AV_LOG_ERROR,
576  "invalid number of slices: %d\n", s->nb_slices);
577  return AVERROR_INVALIDDATA;
578  }
579 
580  if (s->interlaced) {
581  if ((s->slice_height >> s->vshift[1]) < 2) {
582  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
583  return AVERROR_INVALIDDATA;
584  }
585  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
586  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
587  return AVERROR_INVALIDDATA;
588  }
589  }
590 
591  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
592  return AVERROR_INVALIDDATA;
593  for (i = 0; i < s->planes; i++) {
594  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
595  if (!s->slices[i])
596  return AVERROR(ENOMEM);
597 
598  offset = bytestream2_get_le32u(&gb);
599  if (offset >= avpkt->size - header_size)
600  return AVERROR_INVALIDDATA;
601 
602  if (i == 0)
603  first_offset = offset;
604 
605  for (j = 0; j < s->nb_slices - 1; j++) {
606  s->slices[i][j].start = offset + header_size;
607 
608  next_offset = bytestream2_get_le32u(&gb);
609  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
610  return AVERROR_INVALIDDATA;
611 
612  s->slices[i][j].size = next_offset - offset;
613  if (s->slices[i][j].size < 2)
614  return AVERROR_INVALIDDATA;
615  offset = next_offset;
616  }
617 
618  s->slices[i][j].start = offset + header_size;
619  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
620 
621  if (s->slices[i][j].size < 2)
622  return AVERROR_INVALIDDATA;
623  }
624 
625  if (bytestream2_get_byteu(&gb) != s->planes)
626  return AVERROR_INVALIDDATA;
627 
628  bytestream2_skipu(&gb, s->nb_slices * s->planes);
629 
630  table_size = header_size + first_offset - bytestream2_tell(&gb);
631  if (table_size < 2)
632  return AVERROR_INVALIDDATA;
633 
634  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
635  table_size, s->max);
636  if (ret < 0)
637  return ret;
638 
640  p->key_frame = 1;
641 
642  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
643  return ret;
644 
645  s->buf = avpkt->data;
646  s->p = p;
647  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
648 
649  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
650  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
651  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
652  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
653  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
654  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
655  FFSWAP(uint8_t*, p->data[0], p->data[1]);
656  FFSWAP(int, p->linesize[0], p->linesize[1]);
657  } else {
658  switch (s->color_matrix) {
659  case 1:
661  break;
662  case 2:
664  break;
665  }
666  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
667  }
668 
669  *got_frame = 1;
670 
671  return avpkt->size;
672 }
673 
675 {
676  MagicYUVContext *s = avctx->priv_data;
677  ff_llviddsp_init(&s->llviddsp);
678  return 0;
679 }
680 
682 {
683  MagicYUVContext * const s = avctx->priv_data;
684  int i;
685 
686  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
687  av_freep(&s->slices[i]);
688  s->slices_size[i] = 0;
689  ff_free_vlc(&s->vlc[i]);
690  }
691 
692  return 0;
693 }
694 
696  .p.name = "magicyuv",
697  CODEC_LONG_NAME("MagicYUV video"),
698  .p.type = AVMEDIA_TYPE_VIDEO,
699  .p.id = AV_CODEC_ID_MAGICYUV,
700  .priv_data_size = sizeof(MagicYUVContext),
702  .close = magy_decode_end,
704  .p.capabilities = AV_CODEC_CAP_DR1 |
707 };
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:695
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:67
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:592
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Prediction
Definition: aptx.h:70
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
MEDIAN
@ MEDIAN
Definition: magicyuv.c:45
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:58
src1
const pixel * src1
Definition: h264pred_template.c:421
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:65
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
huff_build
static int huff_build(const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, int nb_elems, void *logctx)
Definition: magicyuv.c:75
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:603
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:61
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:72
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
table
static const uint16_t table[]
Definition: prosumer.c:205
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
FFCodec
Definition: codec_internal.h:127
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:377
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Slice::size
uint32_t size
Definition: magicyuv.c:39
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:114
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
MagicYUVContext
Definition: magicyuv.c:53
Slice::start
uint32_t start
Definition: magicyuv.c:38
MagicYUVContext::max
int max
Definition: magicyuv.c:55
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2928
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MagicYUVContext::bps
int bps
Definition: magicyuv.c:56
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
GetBitContext
Definition: get_bits.h:107
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:422
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
LLVidDSPContext
Definition: lossless_videodsp.h:28
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:68
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:479
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:480
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:127
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
GRADIENT
@ GRADIENT
Definition: magicyuv.c:44
decode.h
get_bits.h
Slice
Definition: magicyuv.c:37
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:60
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:440
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:66
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:674
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_init_vlc_from_lengths
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:328
Prediction
Prediction
Definition: magicyuv.c:42
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:70
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:631
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:273
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1642
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:54
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:162
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:57
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:62
version
version
Definition: libkvazaar.c:313
interlaced
uint8_t interlaced
Definition: mxfenc.c:2046
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:69
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:63
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:681
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:430
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:426
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:93
VLC
Definition: vlc.h:31
HuffEntry
Definition: exr.c:94
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
MagicYUVContext::planes
int planes
Definition: magicyuv.c:59
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:64
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:246
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:589
int
int
Definition: ffmpeg_filter.c:156
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1551
LEFT
@ LEFT
Definition: magicyuv.c:43