FFmpeg
magicyuvenc.c
Go to the documentation of this file.
1 /*
2  * MagicYUV encoder
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/qsort.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "put_bits.h"
32 #include "internal.h"
33 #include "thread.h"
34 #include "lossless_videoencdsp.h"
35 
36 typedef enum Prediction {
37  LEFT = 1,
40 } Prediction;
41 
42 typedef struct HuffEntry {
43  uint8_t len;
44  uint32_t code;
45 } HuffEntry;
46 
47 typedef struct PTable {
48  int value; ///< input value
49  int64_t prob; ///< number of occurences of this value in input
50 } PTable;
51 
52 typedef struct MagicYUVContext {
53  const AVClass *class;
56  int planes;
58  AVFrame *p;
59  int slice_height;
60  int nb_slices;
61  int correlate;
62  int hshift[4];
63  int vshift[4];
65  unsigned slice_pos[4];
66  unsigned tables_size;
67  HuffEntry he[4][256];
69  void (*predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst,
70  ptrdiff_t stride, int width, int height);
72 
74  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
75  int width, int height)
76 {
77  uint8_t prev = 0;
78  int i, j;
79 
80  for (i = 0; i < width; i++) {
81  dst[i] = src[i] - prev;
82  prev = src[i];
83  }
84  dst += width;
85  src += stride;
86  for (j = 1; j < height; j++) {
87  prev = src[-stride];
88  for (i = 0; i < width; i++) {
89  dst[i] = src[i] - prev;
90  prev = src[i];
91  }
92  dst += width;
93  src += stride;
94  }
95 }
96 
98  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
99  int width, int height)
100 {
101  int left = 0, top, lefttop;
102  int i, j;
103 
104  for (i = 0; i < width; i++) {
105  dst[i] = src[i] - left;
106  left = src[i];
107  }
108  dst += width;
109  src += stride;
110  for (j = 1; j < height; j++) {
111  top = src[-stride];
112  left = src[0] - top;
113  dst[0] = left;
114  for (i = 1; i < width; i++) {
115  top = src[i - stride];
116  lefttop = src[i - (stride + 1)];
117  left = src[i-1];
118  dst[i] = (src[i] - top) - left + lefttop;
119  }
120  dst += width;
121  src += stride;
122  }
123 }
124 
126  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
127  int width, int height)
128 {
129  int left = 0, lefttop;
130  int i, j;
131 
132  for (i = 0; i < width; i++) {
133  dst[i] = src[i] - left;
134  left = src[i];
135  }
136  dst += width;
137  src += stride;
138  for (j = 1; j < height; j++) {
139  left = lefttop = src[-stride];
140  s->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &left, &lefttop);
141  dst += width;
142  src += stride;
143  }
144 }
145 
147 {
148  MagicYUVContext *s = avctx->priv_data;
150  int i;
151 
152  switch (avctx->pix_fmt) {
153  case AV_PIX_FMT_GBRP:
154  avctx->codec_tag = MKTAG('M', '8', 'R', 'G');
155  s->correlate = 1;
156  s->format = 0x65;
157  break;
158  case AV_PIX_FMT_GBRAP:
159  avctx->codec_tag = MKTAG('M', '8', 'R', 'A');
160  s->correlate = 1;
161  s->format = 0x66;
162  break;
163  case AV_PIX_FMT_YUV420P:
164  avctx->codec_tag = MKTAG('M', '8', 'Y', '0');
165  s->hshift[1] =
166  s->vshift[1] =
167  s->hshift[2] =
168  s->vshift[2] = 1;
169  s->format = 0x69;
170  break;
171  case AV_PIX_FMT_YUV422P:
172  avctx->codec_tag = MKTAG('M', '8', 'Y', '2');
173  s->hshift[1] =
174  s->hshift[2] = 1;
175  s->format = 0x68;
176  break;
177  case AV_PIX_FMT_YUV444P:
178  avctx->codec_tag = MKTAG('M', '8', 'Y', '4');
179  s->format = 0x67;
180  break;
181  case AV_PIX_FMT_YUVA444P:
182  avctx->codec_tag = MKTAG('M', '8', 'Y', 'A');
183  s->format = 0x6a;
184  break;
185  case AV_PIX_FMT_GRAY8:
186  avctx->codec_tag = MKTAG('M', '8', 'G', '0');
187  s->format = 0x6b;
188  break;
189  default:
190  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n",
191  avctx->pix_fmt);
192  return AVERROR_INVALIDDATA;
193  }
194 
195  ff_llvidencdsp_init(&s->llvidencdsp);
196 
197  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
198 
199  s->nb_slices = 1;
200 
201  for (i = 0; i < s->planes; i++) {
202  s->slices[i] = av_malloc(avctx->width * (avctx->height + 2) +
204  if (!s->slices[i]) {
205  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer.\n");
206  return AVERROR(ENOMEM);
207  }
208  }
209 
210  switch (s->frame_pred) {
211  case LEFT: s->predict = left_predict; break;
212  case GRADIENT: s->predict = gradient_predict; break;
213  case MEDIAN: s->predict = median_predict; break;
214  }
215 
216  avctx->extradata_size = 32;
217 
218  avctx->extradata = av_mallocz(avctx->extradata_size +
220 
221  if (!avctx->extradata) {
222  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
223  return AVERROR(ENOMEM);
224  }
225 
227  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
228  bytestream2_put_le32(&pb, 32);
229  bytestream2_put_byte(&pb, 7);
230  bytestream2_put_byte(&pb, s->format);
231  bytestream2_put_byte(&pb, 12);
232  bytestream2_put_byte(&pb, 0);
233 
234  bytestream2_put_byte(&pb, 0);
235  bytestream2_put_byte(&pb, 0);
236  bytestream2_put_byte(&pb, 32);
237  bytestream2_put_byte(&pb, 0);
238 
239  bytestream2_put_le32(&pb, avctx->width);
240  bytestream2_put_le32(&pb, avctx->height);
241  bytestream2_put_le32(&pb, avctx->width);
242  bytestream2_put_le32(&pb, avctx->height);
243 
244  return 0;
245 }
246 
247 static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
248 {
249  for (unsigned i = 32, nb_codes = 0; i > 0; i--) {
250  uint16_t curr = codes_count[i]; // # of leafs of length i
251  codes_count[i] = nb_codes / 2; // # of non-leaf nodes on level i
252  nb_codes = codes_count[i] + curr; // # of nodes on level i
253  }
254 
255  for (unsigned i = 0; i < 256; i++) {
256  he[i].code = codes_count[he[i].len];
257  codes_count[he[i].len]++;
258  }
259 }
260 
261 static void count_usage(uint8_t *src, int width,
262  int height, PTable *counts)
263 {
264  int i, j;
265 
266  for (j = 0; j < height; j++) {
267  for (i = 0; i < width; i++) {
268  counts[src[i]].prob++;
269  }
270  src += width;
271  }
272 }
273 
274 typedef struct PackageMergerList {
275  int nitems; ///< number of items in the list and probability ex. 4
276  int item_idx[515]; ///< index range for each item in items 0, 2, 5, 9, 13
277  int probability[514]; ///< probability of each item 3, 8, 18, 46
278  int items[257 * 16]; ///< chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
280 
281 static int compare_by_prob(const void *a, const void *b)
282 {
283  const PTable *a2 = a;
284  const PTable *b2 = b;
285  return a2->prob - b2->prob;
286 }
287 
288 static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts,
289  uint16_t codes_counts[33],
290  int size, int max_length)
291 {
292  PackageMergerList list_a, list_b, *to = &list_a, *from = &list_b, *temp;
293  int times, i, j, k;
294  int nbits[257] = {0};
295  int min;
296 
297  av_assert0(max_length > 0);
298 
299  to->nitems = 0;
300  from->nitems = 0;
301  to->item_idx[0] = 0;
302  from->item_idx[0] = 0;
303  AV_QSORT(prob_table, size, PTable, compare_by_prob);
304 
305  for (times = 0; times <= max_length; times++) {
306  to->nitems = 0;
307  to->item_idx[0] = 0;
308 
309  j = 0;
310  k = 0;
311 
312  if (times < max_length) {
313  i = 0;
314  }
315  while (i < size || j + 1 < from->nitems) {
316  to->nitems++;
317  to->item_idx[to->nitems] = to->item_idx[to->nitems - 1];
318  if (i < size &&
319  (j + 1 >= from->nitems ||
320  prob_table[i].prob <
321  from->probability[j] + from->probability[j + 1])) {
322  to->items[to->item_idx[to->nitems]++] = prob_table[i].value;
323  to->probability[to->nitems - 1] = prob_table[i].prob;
324  i++;
325  } else {
326  for (k = from->item_idx[j]; k < from->item_idx[j + 2]; k++) {
327  to->items[to->item_idx[to->nitems]++] = from->items[k];
328  }
329  to->probability[to->nitems - 1] =
330  from->probability[j] + from->probability[j + 1];
331  j += 2;
332  }
333  }
334  temp = to;
335  to = from;
336  from = temp;
337  }
338 
339  min = (size - 1 < from->nitems) ? size - 1 : from->nitems;
340  for (i = 0; i < from->item_idx[min]; i++) {
341  nbits[from->items[i]]++;
342  }
343 
344  for (i = 0; i < size; i++) {
345  distincts[i].len = nbits[i];
346  codes_counts[nbits[i]]++;
347  }
348 }
349 
350 static int encode_table(AVCodecContext *avctx, uint8_t *dst,
351  int width, int height,
352  PutBitContext *pb, HuffEntry *he)
353 {
354  PTable counts[256] = { {0} };
355  uint16_t codes_counts[33] = { 0 };
356  int i;
357 
358  count_usage(dst, width, height, counts);
359 
360  for (i = 0; i < 256; i++) {
361  counts[i].prob++;
362  counts[i].value = i;
363  }
364 
365  magy_huffman_compute_bits(counts, he, codes_counts, 256, 12);
366 
367  calculate_codes(he, codes_counts);
368 
369  for (i = 0; i < 256; i++) {
370  put_bits(pb, 1, 0);
371  put_bits(pb, 7, he[i].len);
372  }
373 
374  return 0;
375 }
376 
377 static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size,
378  int width, int height, HuffEntry *he, int prediction)
379 {
380  PutBitContext pb;
381  int i, j;
382  int count;
383 
384  init_put_bits(&pb, dst, dst_size);
385 
386  put_bits(&pb, 8, 0);
387  put_bits(&pb, 8, prediction);
388 
389  for (j = 0; j < height; j++) {
390  for (i = 0; i < width; i++) {
391  const int idx = src[i];
392  put_bits(&pb, he[idx].len, he[idx].code);
393  }
394 
395  src += width;
396  }
397 
398  count = put_bits_count(&pb) & 0x1F;
399 
400  if (count)
401  put_bits(&pb, 32 - count, 0);
402 
403  count = put_bits_count(&pb);
404 
405  flush_put_bits(&pb);
406 
407  return count >> 3;
408 }
409 
411  const AVFrame *frame, int *got_packet)
412 {
413  MagicYUVContext *s = avctx->priv_data;
414  PutByteContext pb;
415  const int width = avctx->width, height = avctx->height;
416  int pos, slice, i, j, ret = 0;
417 
418  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * s->nb_slices + width * height) *
419  s->planes + 256, 0);
420  if (ret < 0)
421  return ret;
422 
424  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
425  bytestream2_put_le32(&pb, 32); // header size
426  bytestream2_put_byte(&pb, 7); // version
427  bytestream2_put_byte(&pb, s->format);
428  bytestream2_put_byte(&pb, 12); // max huffman length
429  bytestream2_put_byte(&pb, 0);
430 
431  bytestream2_put_byte(&pb, 0);
432  bytestream2_put_byte(&pb, 0);
433  bytestream2_put_byte(&pb, 32); // coder type
434  bytestream2_put_byte(&pb, 0);
435 
436  bytestream2_put_le32(&pb, avctx->width);
437  bytestream2_put_le32(&pb, avctx->height);
438  bytestream2_put_le32(&pb, avctx->width);
439  bytestream2_put_le32(&pb, avctx->height);
440  bytestream2_put_le32(&pb, 0);
441 
442  for (i = 0; i < s->planes; i++) {
443  bytestream2_put_le32(&pb, 0);
444  for (j = 1; j < s->nb_slices; j++) {
445  bytestream2_put_le32(&pb, 0);
446  }
447  }
448 
449  bytestream2_put_byte(&pb, s->planes);
450 
451  for (i = 0; i < s->planes; i++) {
452  for (slice = 0; slice < s->nb_slices; slice++) {
453  bytestream2_put_byte(&pb, i);
454  }
455  }
456 
457  if (s->correlate) {
458  uint8_t *r, *g, *b;
460 
461  g = p->data[0];
462  b = p->data[1];
463  r = p->data[2];
464 
465  for (i = 0; i < height; i++) {
466  s->llvidencdsp.diff_bytes(b, b, g, width);
467  s->llvidencdsp.diff_bytes(r, r, g, width);
468  g += p->linesize[0];
469  b += p->linesize[1];
470  r += p->linesize[2];
471  }
472 
473  FFSWAP(uint8_t*, p->data[0], p->data[1]);
474  FFSWAP(int, p->linesize[0], p->linesize[1]);
475 
476  for (i = 0; i < s->planes; i++) {
477  for (slice = 0; slice < s->nb_slices; slice++) {
478  s->predict(s, p->data[i], s->slices[i], p->linesize[i],
479  p->width, p->height);
480  }
481  }
482 
483  av_frame_free(&p);
484  } else {
485  for (i = 0; i < s->planes; i++) {
486  for (slice = 0; slice < s->nb_slices; slice++) {
487  s->predict(s, frame->data[i], s->slices[i], frame->linesize[i],
488  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
489  AV_CEIL_RSHIFT(frame->height, s->vshift[i]));
490  }
491  }
492  }
493 
495 
496  for (i = 0; i < s->planes; i++) {
497  encode_table(avctx, s->slices[i],
498  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
499  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
500  &s->pb, s->he[i]);
501  }
502  s->tables_size = (put_bits_count(&s->pb) + 7) >> 3;
503  bytestream2_skip_p(&pb, s->tables_size);
504 
505  for (i = 0; i < s->planes; i++) {
506  unsigned slice_size;
507 
508  s->slice_pos[i] = bytestream2_tell_p(&pb);
509  slice_size = encode_slice(s->slices[i], pkt->data + bytestream2_tell_p(&pb),
511  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
512  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
513  s->he[i], s->frame_pred);
514  bytestream2_skip_p(&pb, slice_size);
515  }
516 
517  pos = bytestream2_tell_p(&pb);
518  bytestream2_seek_p(&pb, 32, SEEK_SET);
519  bytestream2_put_le32(&pb, s->slice_pos[0] - 32);
520  for (i = 0; i < s->planes; i++) {
521  bytestream2_put_le32(&pb, s->slice_pos[i] - 32);
522  }
523  bytestream2_seek_p(&pb, pos, SEEK_SET);
524 
525  pkt->size = bytestream2_tell_p(&pb);
527 
528  *got_packet = 1;
529 
530  return 0;
531 }
532 
534 {
535  MagicYUVContext *s = avctx->priv_data;
536  int i;
537 
538  for (i = 0; i < s->planes; i++)
539  av_freep(&s->slices[i]);
540 
541  return 0;
542 }
543 
544 #define OFFSET(x) offsetof(MagicYUVContext, x)
545 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
546 static const AVOption options[] = {
547  { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, {.i64=LEFT}, LEFT, MEDIAN, VE, "pred" },
548  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, 0, 0, VE, "pred" },
549  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = GRADIENT }, 0, 0, VE, "pred" },
550  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, 0, 0, VE, "pred" },
551  { NULL},
552 };
553 
554 static const AVClass magicyuv_class = {
555  .class_name = "magicyuv",
556  .item_name = av_default_item_name,
557  .option = options,
558  .version = LIBAVUTIL_VERSION_INT,
559 };
560 
562  .name = "magicyuv",
563  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
564  .type = AVMEDIA_TYPE_VIDEO,
565  .id = AV_CODEC_ID_MAGICYUV,
566  .priv_data_size = sizeof(MagicYUVContext),
567  .priv_class = &magicyuv_class,
569  .close = magy_encode_close,
570  .encode2 = magy_encode_frame,
571  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
572  .pix_fmts = (const enum AVPixelFormat[]) {
576  },
577  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
578 };
AVCodec
AVCodec.
Definition: codec.h:197
GRADIENT
@ GRADIENT
Definition: magicyuvenc.c:38
stride
int stride
Definition: mace.c:144
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:67
VE
#define VE
Definition: magicyuvenc.c:545
gradient_predict
static void gradient_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:97
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
left_predict
static void left_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:73
Prediction
Definition: aptx.h:72
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
HuffEntry::len
uint8_t len
Definition: exr.c:94
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:58
compare_by_prob
static int compare_by_prob(const void *a, const void *b)
Definition: magicyuvenc.c:281
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:65
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
MagicYUVContext::predict
void(* predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:69
MagicYUVContext::he
HuffEntry he[4][256]
Definition: magicyuvenc.c:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
pixdesc.h
AVFrame::width
int width
Definition: frame.h:376
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
PackageMergerList::item_idx
int item_idx[515]
index range for each item in items 0, 2, 5, 9, 13
Definition: magicyuvenc.c:276
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
count_usage
static void count_usage(uint8_t *src, int width, int height, PTable *counts)
Definition: magicyuvenc.c:261
MagicYUVContext::slice_pos
unsigned slice_pos[4]
Definition: magicyuvenc.c:65
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
thread.h
MagicYUVContext::pb
PutBitContext pb
Definition: magicyuvenc.c:55
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MagicYUVContext
Definition: magicyuv.c:53
encode_table
static int encode_table(AVCodecContext *avctx, uint8_t *dst, int width, int height, PutBitContext *pb, HuffEntry *he)
Definition: magicyuvenc.c:350
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
MagicYUVContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: magicyuvenc.c:68
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
magy_encode_frame
static int magy_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: magicyuvenc.c:410
calculate_codes
static void calculate_codes(HuffEntry *he, uint16_t codes_count[33])
Definition: magicyuvenc.c:247
magicyuv_class
static const AVClass magicyuv_class
Definition: magicyuvenc.c:554
OFFSET
#define OFFSET(x)
Definition: magicyuvenc.c:544
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
bytestream2_get_bytes_left_p
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:163
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
MEDIAN
@ MEDIAN
Definition: magicyuvenc.c:39
Prediction
Prediction
Definition: magicyuvenc.c:36
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
g
const char * g
Definition: vf_curves.c:117
MagicYUVContext::tables_size
unsigned tables_size
Definition: magicyuvenc.c:66
PackageMergerList::nitems
int nitems
number of items in the list and probability ex. 4
Definition: magicyuvenc.c:275
from
const char * from
Definition: jacosubdec.c:65
to
const char * to
Definition: webvttdec.c:34
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PTable::prob
int64_t prob
number of occurences of this value in input
Definition: magicyuvenc.c:49
PutBitContext
Definition: put_bits.h:44
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PTable
Used to assign a occurrence count or "probability" to an input value.
Definition: magicyuvenc.c:47
NULL
#define NULL
Definition: coverity.c:32
median_predict
static void median_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:125
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
MagicYUVContext::correlate
int correlate
Definition: magicyuvenc.c:61
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:269
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
qsort.h
PackageMergerList
Used to store intermediate lists in the package merge algorithm.
Definition: magicyuvenc.c:274
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
options
static const AVOption options[]
Definition: magicyuvenc.c:546
size
int size
Definition: twinvq_data.h:10344
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1666
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:57
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
PTable::value
int value
input value
Definition: magicyuvenc.c:48
magy_encode_close
static av_cold int magy_encode_close(AVCodecContext *avctx)
Definition: magicyuvenc.c:533
lossless_videoencdsp.h
MagicYUVContext::frame_pred
int frame_pred
Definition: magicyuvenc.c:54
i
int i
Definition: input.c:407
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:76
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
bytestream2_skip_p
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:180
a2
#define a2
Definition: regdef.h:48
uint8_t
uint8_t
Definition: audio_convert.c:194
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
PackageMergerList::items
int items[257 *16]
chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D,...
Definition: magicyuvenc.c:278
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:26
LEFT
@ LEFT
Definition: magicyuvenc.c:37
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
HuffEntry::code
uint32_t code
Definition: exr.c:96
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AVFrame::height
int height
Definition: frame.h:376
magy_encode_init
static av_cold int magy_encode_init(AVCodecContext *avctx)
Definition: magicyuvenc.c:146
PackageMergerList::probability
int probability[514]
probability of each item 3, 8, 18, 46
Definition: magicyuvenc.c:277
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
ff_magicyuv_encoder
AVCodec ff_magicyuv_encoder
Definition: magicyuvenc.c:561
HuffEntry
Definition: exr.c:93
temp
else temp
Definition: vf_mcdeint.c:259
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
MagicYUVContext::planes
int planes
Definition: magicyuv.c:59
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:54
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
magy_huffman_compute_bits
static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts, uint16_t codes_counts[33], int size, int max_length)
Definition: magicyuvenc.c:288
encode_slice
static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he, int prediction)
Definition: magicyuvenc.c:377
MagicYUVContext::format
uint8_t format
Definition: magicyuvenc.c:57
min
float min
Definition: vorbis_enc_data.h:456