FFmpeg
magicyuvenc.c
Go to the documentation of this file.
1 /*
2  * MagicYUV encoder
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/qsort.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "encode.h"
33 #include "put_bits.h"
34 #include "thread.h"
35 #include "lossless_videoencdsp.h"
36 
37 #define MAGICYUV_EXTRADATA_SIZE 32
38 
39 typedef enum Prediction {
40  LEFT = 1,
43 } Prediction;
44 
45 typedef struct HuffEntry {
46  uint8_t len;
47  uint32_t code;
48 } HuffEntry;
49 
50 typedef struct PTable {
51  int value; ///< input value
52  int64_t prob; ///< number of occurences of this value in input
53 } PTable;
54 
55 typedef struct MagicYUVContext {
56  const AVClass *class;
59  int planes;
60  uint8_t format;
61  int slice_height;
62  int nb_slices;
63  int correlate;
64  int hshift[4];
65  int vshift[4];
66  uint8_t *slices[4];
67  unsigned slice_pos[4];
68  unsigned tables_size;
69  uint8_t *decorrelate_buf[2];
70  HuffEntry he[4][256];
72  void (*predict)(struct MagicYUVContext *s, const uint8_t *src, uint8_t *dst,
73  ptrdiff_t stride, int width, int height);
75 
77  const uint8_t *src, uint8_t *dst, ptrdiff_t stride,
78  int width, int height)
79 {
80  uint8_t prev = 0;
81  int i, j;
82 
83  for (i = 0; i < width; i++) {
84  dst[i] = src[i] - prev;
85  prev = src[i];
86  }
87  dst += width;
88  src += stride;
89  for (j = 1; j < height; j++) {
90  prev = src[-stride];
91  for (i = 0; i < width; i++) {
92  dst[i] = src[i] - prev;
93  prev = src[i];
94  }
95  dst += width;
96  src += stride;
97  }
98 }
99 
101  const uint8_t *src, uint8_t *dst, ptrdiff_t stride,
102  int width, int height)
103 {
104  int left = 0, top, lefttop;
105  int i, j;
106 
107  for (i = 0; i < width; i++) {
108  dst[i] = src[i] - left;
109  left = src[i];
110  }
111  dst += width;
112  src += stride;
113  for (j = 1; j < height; j++) {
114  top = src[-stride];
115  left = src[0] - top;
116  dst[0] = left;
117  for (i = 1; i < width; i++) {
118  top = src[i - stride];
119  lefttop = src[i - (stride + 1)];
120  left = src[i-1];
121  dst[i] = (src[i] - top) - left + lefttop;
122  }
123  dst += width;
124  src += stride;
125  }
126 }
127 
129  const uint8_t *src, uint8_t *dst, ptrdiff_t stride,
130  int width, int height)
131 {
132  int left = 0, lefttop;
133  int i, j;
134 
135  for (i = 0; i < width; i++) {
136  dst[i] = src[i] - left;
137  left = src[i];
138  }
139  dst += width;
140  src += stride;
141  for (j = 1; j < height; j++) {
142  left = lefttop = src[-stride];
143  s->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &left, &lefttop);
144  dst += width;
145  src += stride;
146  }
147 }
148 
150 {
151  MagicYUVContext *s = avctx->priv_data;
153  int i;
154 
155  switch (avctx->pix_fmt) {
156  case AV_PIX_FMT_GBRP:
157  avctx->codec_tag = MKTAG('M', '8', 'R', 'G');
158  s->correlate = 1;
159  s->format = 0x65;
160  break;
161  case AV_PIX_FMT_GBRAP:
162  avctx->codec_tag = MKTAG('M', '8', 'R', 'A');
163  s->correlate = 1;
164  s->format = 0x66;
165  break;
166  case AV_PIX_FMT_YUV420P:
167  avctx->codec_tag = MKTAG('M', '8', 'Y', '0');
168  s->hshift[1] =
169  s->vshift[1] =
170  s->hshift[2] =
171  s->vshift[2] = 1;
172  s->format = 0x69;
173  break;
174  case AV_PIX_FMT_YUV422P:
175  avctx->codec_tag = MKTAG('M', '8', 'Y', '2');
176  s->hshift[1] =
177  s->hshift[2] = 1;
178  s->format = 0x68;
179  break;
180  case AV_PIX_FMT_YUV444P:
181  avctx->codec_tag = MKTAG('M', '8', 'Y', '4');
182  s->format = 0x67;
183  break;
184  case AV_PIX_FMT_YUVA444P:
185  avctx->codec_tag = MKTAG('M', '8', 'Y', 'A');
186  s->format = 0x6a;
187  break;
188  case AV_PIX_FMT_GRAY8:
189  avctx->codec_tag = MKTAG('M', '8', 'G', '0');
190  s->format = 0x6b;
191  break;
192  }
193  if (s->correlate) {
194  s->decorrelate_buf[0] = av_calloc(2U * avctx->height, FFALIGN(avctx->width, 16));
195  if (!s->decorrelate_buf[0])
196  return AVERROR(ENOMEM);
197  s->decorrelate_buf[1] = s->decorrelate_buf[0] + avctx->height * FFALIGN(avctx->width, 16);
198  }
199 
200  ff_llvidencdsp_init(&s->llvidencdsp);
201 
202  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
203 
204  s->nb_slices = 1;
205 
206  for (i = 0; i < s->planes; i++) {
207  s->slices[i] = av_malloc(avctx->width * (avctx->height + 2) +
209  if (!s->slices[i]) {
210  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer.\n");
211  return AVERROR(ENOMEM);
212  }
213  }
214 
215  switch (s->frame_pred) {
216  case LEFT: s->predict = left_predict; break;
217  case GRADIENT: s->predict = gradient_predict; break;
218  case MEDIAN: s->predict = median_predict; break;
219  }
220 
222 
223  avctx->extradata = av_mallocz(avctx->extradata_size +
225 
226  if (!avctx->extradata) {
227  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
228  return AVERROR(ENOMEM);
229  }
230 
232  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
233  bytestream2_put_le32(&pb, 32);
234  bytestream2_put_byte(&pb, 7);
235  bytestream2_put_byte(&pb, s->format);
236  bytestream2_put_byte(&pb, 12);
237  bytestream2_put_byte(&pb, 0);
238 
239  bytestream2_put_byte(&pb, 0);
240  bytestream2_put_byte(&pb, 0);
241  bytestream2_put_byte(&pb, 32);
242  bytestream2_put_byte(&pb, 0);
243 
244  bytestream2_put_le32(&pb, avctx->width);
245  bytestream2_put_le32(&pb, avctx->height);
246  bytestream2_put_le32(&pb, avctx->width);
247  bytestream2_put_le32(&pb, avctx->height);
248 
249  return 0;
250 }
251 
252 static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
253 {
254  for (unsigned i = 32, nb_codes = 0; i > 0; i--) {
255  uint16_t curr = codes_count[i]; // # of leafs of length i
256  codes_count[i] = nb_codes / 2; // # of non-leaf nodes on level i
257  nb_codes = codes_count[i] + curr; // # of nodes on level i
258  }
259 
260  for (unsigned i = 0; i < 256; i++) {
261  he[i].code = codes_count[he[i].len];
262  codes_count[he[i].len]++;
263  }
264 }
265 
266 static void count_usage(uint8_t *src, int width,
267  int height, PTable *counts)
268 {
269  int i, j;
270 
271  for (j = 0; j < height; j++) {
272  for (i = 0; i < width; i++) {
273  counts[src[i]].prob++;
274  }
275  src += width;
276  }
277 }
278 
279 typedef struct PackageMergerList {
280  int nitems; ///< number of items in the list and probability ex. 4
281  int item_idx[515]; ///< index range for each item in items 0, 2, 5, 9, 13
282  int probability[514]; ///< probability of each item 3, 8, 18, 46
283  int items[257 * 16]; ///< chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
285 
286 static int compare_by_prob(const void *a, const void *b)
287 {
288  const PTable *a2 = a;
289  const PTable *b2 = b;
290  return a2->prob - b2->prob;
291 }
292 
293 static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts,
294  uint16_t codes_counts[33],
295  int size, int max_length)
296 {
297  PackageMergerList list_a, list_b, *to = &list_a, *from = &list_b, *temp;
298  int times, i, j, k;
299  int nbits[257] = {0};
300  int min;
301 
302  av_assert0(max_length > 0);
303 
304  to->nitems = 0;
305  from->nitems = 0;
306  to->item_idx[0] = 0;
307  from->item_idx[0] = 0;
308  AV_QSORT(prob_table, size, PTable, compare_by_prob);
309 
310  for (times = 0; times <= max_length; times++) {
311  to->nitems = 0;
312  to->item_idx[0] = 0;
313 
314  j = 0;
315  k = 0;
316 
317  if (times < max_length) {
318  i = 0;
319  }
320  while (i < size || j + 1 < from->nitems) {
321  to->nitems++;
322  to->item_idx[to->nitems] = to->item_idx[to->nitems - 1];
323  if (i < size &&
324  (j + 1 >= from->nitems ||
325  prob_table[i].prob <
326  from->probability[j] + from->probability[j + 1])) {
327  to->items[to->item_idx[to->nitems]++] = prob_table[i].value;
328  to->probability[to->nitems - 1] = prob_table[i].prob;
329  i++;
330  } else {
331  for (k = from->item_idx[j]; k < from->item_idx[j + 2]; k++) {
332  to->items[to->item_idx[to->nitems]++] = from->items[k];
333  }
334  to->probability[to->nitems - 1] =
335  from->probability[j] + from->probability[j + 1];
336  j += 2;
337  }
338  }
339  temp = to;
340  to = from;
341  from = temp;
342  }
343 
344  min = (size - 1 < from->nitems) ? size - 1 : from->nitems;
345  for (i = 0; i < from->item_idx[min]; i++) {
346  nbits[from->items[i]]++;
347  }
348 
349  for (i = 0; i < size; i++) {
350  distincts[i].len = nbits[i];
351  codes_counts[nbits[i]]++;
352  }
353 }
354 
355 static int encode_table(AVCodecContext *avctx, uint8_t *dst,
356  int width, int height,
357  PutBitContext *pb, HuffEntry *he)
358 {
359  PTable counts[256] = { {0} };
360  uint16_t codes_counts[33] = { 0 };
361  int i;
362 
363  count_usage(dst, width, height, counts);
364 
365  for (i = 0; i < 256; i++) {
366  counts[i].prob++;
367  counts[i].value = i;
368  }
369 
370  magy_huffman_compute_bits(counts, he, codes_counts, 256, 12);
371 
372  calculate_codes(he, codes_counts);
373 
374  for (i = 0; i < 256; i++) {
375  put_bits(pb, 1, 0);
376  put_bits(pb, 7, he[i].len);
377  }
378 
379  return 0;
380 }
381 
382 static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size,
383  int width, int height, HuffEntry *he, int prediction)
384 {
385  PutBitContext pb;
386  int i, j;
387  int count;
388 
389  init_put_bits(&pb, dst, dst_size);
390 
391  put_bits(&pb, 8, 0);
392  put_bits(&pb, 8, prediction);
393 
394  for (j = 0; j < height; j++) {
395  for (i = 0; i < width; i++) {
396  const int idx = src[i];
397  put_bits(&pb, he[idx].len, he[idx].code);
398  }
399 
400  src += width;
401  }
402 
403  count = put_bits_count(&pb) & 0x1F;
404 
405  if (count)
406  put_bits(&pb, 32 - count, 0);
407 
408  flush_put_bits(&pb);
409 
410  return put_bytes_output(&pb);
411 }
412 
414  const AVFrame *frame, int *got_packet)
415 {
416  MagicYUVContext *s = avctx->priv_data;
417  PutByteContext pb;
418  const int width = avctx->width, height = avctx->height;
419  int pos, slice, i, j, ret = 0;
420 
421  ret = ff_alloc_packet(avctx, pkt, (256 + 4 * s->nb_slices + width * height) *
422  s->planes + 256);
423  if (ret < 0)
424  return ret;
425 
427  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
428  bytestream2_put_le32(&pb, 32); // header size
429  bytestream2_put_byte(&pb, 7); // version
430  bytestream2_put_byte(&pb, s->format);
431  bytestream2_put_byte(&pb, 12); // max huffman length
432  bytestream2_put_byte(&pb, 0);
433 
434  bytestream2_put_byte(&pb, 0);
435  bytestream2_put_byte(&pb, 0);
436  bytestream2_put_byte(&pb, 32); // coder type
437  bytestream2_put_byte(&pb, 0);
438 
439  bytestream2_put_le32(&pb, avctx->width);
440  bytestream2_put_le32(&pb, avctx->height);
441  bytestream2_put_le32(&pb, avctx->width);
442  bytestream2_put_le32(&pb, avctx->height);
443  bytestream2_put_le32(&pb, 0);
444 
445  for (i = 0; i < s->planes; i++) {
446  bytestream2_put_le32(&pb, 0);
447  for (j = 1; j < s->nb_slices; j++) {
448  bytestream2_put_le32(&pb, 0);
449  }
450  }
451 
452  bytestream2_put_byte(&pb, s->planes);
453 
454  for (i = 0; i < s->planes; i++) {
455  for (slice = 0; slice < s->nb_slices; slice++) {
456  bytestream2_put_byte(&pb, i);
457  }
458  }
459 
460  if (s->correlate) {
461  uint8_t *decorrelated[2] = { s->decorrelate_buf[0],
462  s->decorrelate_buf[1] };
463  const int decorrelate_linesize = FFALIGN(width, 16);
464  const uint8_t *const data[4] = { decorrelated[0], frame->data[0],
465  decorrelated[1], frame->data[3] };
466  const uint8_t *r, *g, *b;
467  const int linesize[4] = { decorrelate_linesize, frame->linesize[0],
468  decorrelate_linesize, frame->linesize[3] };
469 
470  g = frame->data[0];
471  b = frame->data[1];
472  r = frame->data[2];
473 
474  for (i = 0; i < height; i++) {
475  s->llvidencdsp.diff_bytes(decorrelated[0], b, g, width);
476  s->llvidencdsp.diff_bytes(decorrelated[1], r, g, width);
477  g += frame->linesize[0];
478  b += frame->linesize[1];
479  r += frame->linesize[2];
480  decorrelated[0] += decorrelate_linesize;
481  decorrelated[1] += decorrelate_linesize;
482  }
483 
484  for (i = 0; i < s->planes; i++) {
485  for (slice = 0; slice < s->nb_slices; slice++) {
486  s->predict(s, data[i], s->slices[i], linesize[i],
487  frame->width, frame->height);
488  }
489  }
490  } else {
491  for (i = 0; i < s->planes; i++) {
492  for (slice = 0; slice < s->nb_slices; slice++) {
493  s->predict(s, frame->data[i], s->slices[i], frame->linesize[i],
494  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
495  AV_CEIL_RSHIFT(frame->height, s->vshift[i]));
496  }
497  }
498  }
499 
501 
502  for (i = 0; i < s->planes; i++) {
503  encode_table(avctx, s->slices[i],
504  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
505  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
506  &s->pb, s->he[i]);
507  }
508  s->tables_size = put_bytes_count(&s->pb, 1);
509  bytestream2_skip_p(&pb, s->tables_size);
510 
511  for (i = 0; i < s->planes; i++) {
512  unsigned slice_size;
513 
514  s->slice_pos[i] = bytestream2_tell_p(&pb);
515  slice_size = encode_slice(s->slices[i], pkt->data + bytestream2_tell_p(&pb),
517  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
518  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
519  s->he[i], s->frame_pred);
520  bytestream2_skip_p(&pb, slice_size);
521  }
522 
523  pos = bytestream2_tell_p(&pb);
524  bytestream2_seek_p(&pb, 32, SEEK_SET);
525  bytestream2_put_le32(&pb, s->slice_pos[0] - 32);
526  for (i = 0; i < s->planes; i++) {
527  bytestream2_put_le32(&pb, s->slice_pos[i] - 32);
528  }
529  bytestream2_seek_p(&pb, pos, SEEK_SET);
530 
531  pkt->size = bytestream2_tell_p(&pb);
532 
533  *got_packet = 1;
534 
535  return 0;
536 }
537 
539 {
540  MagicYUVContext *s = avctx->priv_data;
541  int i;
542 
543  for (i = 0; i < s->planes; i++)
544  av_freep(&s->slices[i]);
545  av_freep(&s->decorrelate_buf);
546 
547  return 0;
548 }
549 
550 #define OFFSET(x) offsetof(MagicYUVContext, x)
551 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
552 static const AVOption options[] = {
553  { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, {.i64=LEFT}, LEFT, MEDIAN, VE, "pred" },
554  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, 0, 0, VE, "pred" },
555  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = GRADIENT }, 0, 0, VE, "pred" },
556  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, 0, 0, VE, "pred" },
557  { NULL},
558 };
559 
560 static const AVClass magicyuv_class = {
561  .class_name = "magicyuv",
562  .item_name = av_default_item_name,
563  .option = options,
564  .version = LIBAVUTIL_VERSION_INT,
565 };
566 
568  .p.name = "magicyuv",
569  CODEC_LONG_NAME("MagicYUV video"),
570  .p.type = AVMEDIA_TYPE_VIDEO,
571  .p.id = AV_CODEC_ID_MAGICYUV,
572  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
574  .priv_data_size = sizeof(MagicYUVContext),
575  .p.priv_class = &magicyuv_class,
576  .init = magy_encode_init,
577  .close = magy_encode_close,
579  .p.pix_fmts = (const enum AVPixelFormat[]) {
583  },
584  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
585 };
GRADIENT
@ GRADIENT
Definition: magicyuvenc.c:41
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:67
VE
#define VE
Definition: magicyuvenc.c:551
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
Prediction
Definition: aptx.h:70
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
HuffEntry::len
uint8_t len
Definition: exr.c:95
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:58
ff_magicyuv_encoder
const FFCodec ff_magicyuv_encoder
Definition: magicyuvenc.c:567
compare_by_prob
static int compare_by_prob(const void *a, const void *b)
Definition: magicyuvenc.c:286
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:65
left_predict
static void left_predict(MagicYUVContext *s, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:76
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
MagicYUVContext::he
HuffEntry he[4][256]
Definition: magicyuvenc.c:70
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
pixdesc.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVOption
AVOption.
Definition: opt.h:251
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
PackageMergerList::item_idx
int item_idx[515]
index range for each item in items 0, 2, 5, 9, 13
Definition: magicyuvenc.c:281
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
data
const char data[16]
Definition: mxf.c:146
FFCodec
Definition: codec_internal.h:127
count_usage
static void count_usage(uint8_t *src, int width, int height, PTable *counts)
Definition: magicyuvenc.c:266
MagicYUVContext::slice_pos
unsigned slice_pos[4]
Definition: magicyuvenc.c:67
thread.h
MagicYUVContext::pb
PutBitContext pb
Definition: magicyuvenc.c:58
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
MagicYUVContext
Definition: magicyuv.c:53
encode_table
static int encode_table(AVCodecContext *avctx, uint8_t *dst, int width, int height, PutBitContext *pb, HuffEntry *he)
Definition: magicyuvenc.c:355
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2928
MagicYUVContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: magicyuvenc.c:71
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
magy_encode_frame
static int magy_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: magicyuvenc.c:413
calculate_codes
static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
Definition: magicyuvenc.c:252
MagicYUVContext::predict
void(* predict)(struct MagicYUVContext *s, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:72
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:315
magicyuv_class
static const AVClass magicyuv_class
Definition: magicyuvenc.c:560
OFFSET
#define OFFSET(x)
Definition: magicyuvenc.c:550
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
bytestream2_get_bytes_left_p
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:163
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
MEDIAN
@ MEDIAN
Definition: magicyuvenc.c:42
Prediction
Prediction
Definition: magicyuvenc.c:39
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
g
const char * g
Definition: vf_curves.c:127
MagicYUVContext::tables_size
unsigned tables_size
Definition: magicyuvenc.c:68
PackageMergerList::nitems
int nitems
number of items in the list and probability ex. 4
Definition: magicyuvenc.c:280
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:156
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PTable::prob
int64_t prob
number of occurences of this value in input
Definition: magicyuvenc.c:52
median_predict
static void median_predict(MagicYUVContext *s, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:128
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTable
Used to assign a occurrence count or "probability" to an input value.
Definition: magicyuvenc.c:50
NULL
#define NULL
Definition: coverity.c:32
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
MagicYUVContext::correlate
int correlate
Definition: magicyuvenc.c:63
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:273
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
PutByteContext
Definition: bytestream.h:37
qsort.h
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
PackageMergerList
Used to store intermediate lists in the package merge algorithm.
Definition: magicyuvenc.c:279
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
options
static const AVOption options[]
Definition: magicyuvenc.c:552
size
int size
Definition: twinvq_data.h:10344
prediction
static int64_t prediction(int delta, ChannelContext *c)
Definition: misc4.c:78
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1772
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:57
PTable::value
int value
input value
Definition: magicyuvenc.c:51
magy_encode_close
static av_cold int magy_encode_close(AVCodecContext *avctx)
Definition: magicyuvenc.c:538
lossless_videoencdsp.h
MagicYUVContext::frame_pred
int frame_pred
Definition: magicyuvenc.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
bytestream2_skip_p
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:180
a2
#define a2
Definition: regdef.h:48
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
PackageMergerList::items
int items[257 *16]
chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D,...
Definition: magicyuvenc.c:283
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
LEFT
@ LEFT
Definition: magicyuvenc.c:40
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
MAGICYUV_EXTRADATA_SIZE
#define MAGICYUV_EXTRADATA_SIZE
Definition: magicyuvenc.c:37
ret
ret
Definition: filter_design.txt:187
MagicYUVContext::decorrelate_buf
uint8_t * decorrelate_buf[2]
Definition: magicyuvenc.c:69
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:413
HuffEntry::code
uint32_t code
Definition: exr.c:97
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:426
magy_encode_init
static av_cold int magy_encode_init(AVCodecContext *avctx)
Definition: magicyuvenc.c:149
PackageMergerList::probability
int probability[514]
probability of each item 3, 8, 18, 46
Definition: magicyuvenc.c:282
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
HuffEntry
Definition: exr.c:94
temp
else temp
Definition: vf_mcdeint.c:248
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
MagicYUVContext::planes
int planes
Definition: magicyuv.c:59
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
gradient_predict
static void gradient_predict(MagicYUVContext *s, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:100
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
magy_huffman_compute_bits
static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts, uint16_t codes_counts[33], int size, int max_length)
Definition: magicyuvenc.c:293
encode_slice
static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he, int prediction)
Definition: magicyuvenc.c:382
MagicYUVContext::format
uint8_t format
Definition: magicyuvenc.c:60
min
float min
Definition: vorbis_enc_data.h:429