FFmpeg
magicyuvenc.c
Go to the documentation of this file.
1 /*
2  * MagicYUV encoder
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/qsort.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "put_bits.h"
32 #include "internal.h"
33 #include "thread.h"
34 #include "lossless_videoencdsp.h"
35 
36 typedef enum Prediction {
37  LEFT = 1,
40 } Prediction;
41 
42 typedef struct HuffEntry {
44  uint8_t len;
45  uint32_t code;
46 } HuffEntry;
47 
48 typedef struct PTable {
49  int value; ///< input value
50  int64_t prob; ///< number of occurences of this value in input
51 } PTable;
52 
53 typedef struct MagicYUVContext {
54  const AVClass *class;
57  int planes;
59  AVFrame *p;
60  int slice_height;
61  int nb_slices;
62  int correlate;
63  int hshift[4];
64  int vshift[4];
65  uint8_t *slices[4];
66  unsigned slice_pos[4];
67  unsigned tables_size;
68  HuffEntry he[4][256];
71  ptrdiff_t stride, int width, int height);
73 
75  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
76  int width, int height)
77 {
78  uint8_t prev = 0;
79  int i, j;
80 
81  for (i = 0; i < width; i++) {
82  dst[i] = src[i] - prev;
83  prev = src[i];
84  }
85  dst += width;
86  src += stride;
87  for (j = 1; j < height; j++) {
88  prev = src[-stride];
89  for (i = 0; i < width; i++) {
90  dst[i] = src[i] - prev;
91  prev = src[i];
92  }
93  dst += width;
94  src += stride;
95  }
96 }
97 
99  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
100  int width, int height)
101 {
102  int left = 0, top, lefttop;
103  int i, j;
104 
105  for (i = 0; i < width; i++) {
106  dst[i] = src[i] - left;
107  left = src[i];
108  }
109  dst += width;
110  src += stride;
111  for (j = 1; j < height; j++) {
112  top = src[-stride];
113  left = src[0] - top;
114  dst[0] = left;
115  for (i = 1; i < width; i++) {
116  top = src[i - stride];
117  lefttop = src[i - (stride + 1)];
118  left = src[i-1];
119  dst[i] = (src[i] - top) - left + lefttop;
120  }
121  dst += width;
122  src += stride;
123  }
124 }
125 
127  uint8_t *src, uint8_t *dst, ptrdiff_t stride,
128  int width, int height)
129 {
130  int left = 0, lefttop;
131  int i, j;
132 
133  for (i = 0; i < width; i++) {
134  dst[i] = src[i] - left;
135  left = src[i];
136  }
137  dst += width;
138  src += stride;
139  for (j = 1; j < height; j++) {
140  left = lefttop = src[-stride];
141  s->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &left, &lefttop);
142  dst += width;
143  src += stride;
144  }
145 }
146 
148 {
149  MagicYUVContext *s = avctx->priv_data;
151  int i;
152 
153  switch (avctx->pix_fmt) {
154  case AV_PIX_FMT_GBRP:
155  avctx->codec_tag = MKTAG('M', '8', 'R', 'G');
156  s->correlate = 1;
157  s->format = 0x65;
158  break;
159  case AV_PIX_FMT_GBRAP:
160  avctx->codec_tag = MKTAG('M', '8', 'R', 'A');
161  s->correlate = 1;
162  s->format = 0x66;
163  break;
164  case AV_PIX_FMT_YUV420P:
165  avctx->codec_tag = MKTAG('M', '8', 'Y', '0');
166  s->hshift[1] =
167  s->vshift[1] =
168  s->hshift[2] =
169  s->vshift[2] = 1;
170  s->format = 0x69;
171  break;
172  case AV_PIX_FMT_YUV422P:
173  avctx->codec_tag = MKTAG('M', '8', 'Y', '2');
174  s->hshift[1] =
175  s->hshift[2] = 1;
176  s->format = 0x68;
177  break;
178  case AV_PIX_FMT_YUV444P:
179  avctx->codec_tag = MKTAG('M', '8', 'Y', '4');
180  s->format = 0x67;
181  break;
182  case AV_PIX_FMT_YUVA444P:
183  avctx->codec_tag = MKTAG('M', '8', 'Y', 'A');
184  s->format = 0x6a;
185  break;
186  case AV_PIX_FMT_GRAY8:
187  avctx->codec_tag = MKTAG('M', '8', 'G', '0');
188  s->format = 0x6b;
189  break;
190  default:
191  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n",
192  avctx->pix_fmt);
193  return AVERROR_INVALIDDATA;
194  }
195 
197 
199 
200  s->nb_slices = 1;
201 
202  for (i = 0; i < s->planes; i++) {
203  s->slices[i] = av_malloc(avctx->width * (avctx->height + 2) +
205  if (!s->slices[i]) {
206  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer.\n");
207  return AVERROR(ENOMEM);
208  }
209  }
210 
211  switch (s->frame_pred) {
212  case LEFT: s->predict = left_predict; break;
213  case GRADIENT: s->predict = gradient_predict; break;
214  case MEDIAN: s->predict = median_predict; break;
215  }
216 
217  avctx->extradata_size = 32;
218 
219  avctx->extradata = av_mallocz(avctx->extradata_size +
221 
222  if (!avctx->extradata) {
223  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
224  return AVERROR(ENOMEM);
225  }
226 
227  bytestream2_init_writer(&pb, avctx->extradata, avctx->extradata_size);
228  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
229  bytestream2_put_le32(&pb, 32);
230  bytestream2_put_byte(&pb, 7);
231  bytestream2_put_byte(&pb, s->format);
232  bytestream2_put_byte(&pb, 12);
233  bytestream2_put_byte(&pb, 0);
234 
235  bytestream2_put_byte(&pb, 0);
236  bytestream2_put_byte(&pb, 0);
237  bytestream2_put_byte(&pb, 32);
238  bytestream2_put_byte(&pb, 0);
239 
240  bytestream2_put_le32(&pb, avctx->width);
241  bytestream2_put_le32(&pb, avctx->height);
242  bytestream2_put_le32(&pb, avctx->width);
243  bytestream2_put_le32(&pb, avctx->height);
244 
245  return 0;
246 }
247 
248 static int magy_huff_cmp_len(const void *a, const void *b)
249 {
250  const HuffEntry *aa = a, *bb = b;
251  return (aa->len - bb->len) * 256 + aa->sym - bb->sym;
252 }
253 
254 static int huff_cmp_sym(const void *a, const void *b)
255 {
256  const HuffEntry *aa = a, *bb = b;
257  return bb->sym - aa->sym;
258 }
259 
261 {
262  uint32_t code;
263  int i;
264 
266 
267  code = 1;
268  for (i = 255; i >= 0; i--) {
269  he[i].code = code >> (32 - he[i].len);
270  code += 0x80000000u >> (he[i].len - 1);
271  }
272 
273  AV_QSORT(he, 256, HuffEntry, huff_cmp_sym);
274 }
275 
276 static void count_usage(uint8_t *src, int width,
277  int height, PTable *counts)
278 {
279  int i, j;
280 
281  for (j = 0; j < height; j++) {
282  for (i = 0; i < width; i++) {
283  counts[src[i]].prob++;
284  }
285  src += width;
286  }
287 }
288 
289 typedef struct PackageMergerList {
290  int nitems; ///< number of items in the list and probability ex. 4
291  int item_idx[515]; ///< index range for each item in items 0, 2, 5, 9, 13
292  int probability[514]; ///< probability of each item 3, 8, 18, 46
293  int items[257 * 16]; ///< chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
295 
296 static int compare_by_prob(const void *a, const void *b)
297 {
298  PTable a_val = *(PTable *)a;
299  PTable b_val = *(PTable *)b;
300  return a_val.prob - b_val.prob;
301 }
302 
303 static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts,
304  int size, int max_length)
305 {
306  PackageMergerList list_a, list_b, *to = &list_a, *from = &list_b, *temp;
307  int times, i, j, k;
308  int nbits[257] = {0};
309  int min;
310 
311  av_assert0(max_length > 0);
312 
313  to->nitems = 0;
314  from->nitems = 0;
315  to->item_idx[0] = 0;
316  from->item_idx[0] = 0;
317  AV_QSORT(prob_table, size, PTable, compare_by_prob);
318 
319  for (times = 0; times <= max_length; times++) {
320  to->nitems = 0;
321  to->item_idx[0] = 0;
322 
323  j = 0;
324  k = 0;
325 
326  if (times < max_length) {
327  i = 0;
328  }
329  while (i < size || j + 1 < from->nitems) {
330  to->nitems++;
331  to->item_idx[to->nitems] = to->item_idx[to->nitems - 1];
332  if (i < size &&
333  (j + 1 >= from->nitems ||
334  prob_table[i].prob <
335  from->probability[j] + from->probability[j + 1])) {
336  to->items[to->item_idx[to->nitems]++] = prob_table[i].value;
337  to->probability[to->nitems - 1] = prob_table[i].prob;
338  i++;
339  } else {
340  for (k = from->item_idx[j]; k < from->item_idx[j + 2]; k++) {
341  to->items[to->item_idx[to->nitems]++] = from->items[k];
342  }
343  to->probability[to->nitems - 1] =
344  from->probability[j] + from->probability[j + 1];
345  j += 2;
346  }
347  }
348  temp = to;
349  to = from;
350  from = temp;
351  }
352 
353  min = (size - 1 < from->nitems) ? size - 1 : from->nitems;
354  for (i = 0; i < from->item_idx[min]; i++) {
355  nbits[from->items[i]]++;
356  }
357 
358  for (i = 0; i < size; i++) {
359  distincts[i].sym = i;
360  distincts[i].len = nbits[i];
361  }
362 }
363 
364 static int encode_table(AVCodecContext *avctx, uint8_t *dst,
365  int width, int height,
366  PutBitContext *pb, HuffEntry *he)
367 {
368  PTable counts[256] = { {0} };
369  int i;
370 
371  count_usage(dst, width, height, counts);
372 
373  for (i = 0; i < 256; i++) {
374  counts[i].prob++;
375  counts[i].value = 255 - i;
376  }
377 
378  magy_huffman_compute_bits(counts, he, 256, 12);
379 
380  calculate_codes(he);
381 
382  for (i = 0; i < 256; i++) {
383  put_bits(pb, 1, 0);
384  put_bits(pb, 7, he[i].len);
385  }
386 
387  return 0;
388 }
389 
390 static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size,
391  int width, int height, HuffEntry *he, int prediction)
392 {
393  PutBitContext pb;
394  int i, j;
395  int count;
396 
397  init_put_bits(&pb, dst, dst_size);
398 
399  put_bits(&pb, 8, 0);
400  put_bits(&pb, 8, prediction);
401 
402  for (j = 0; j < height; j++) {
403  for (i = 0; i < width; i++) {
404  const int idx = src[i];
405  put_bits(&pb, he[idx].len, he[idx].code);
406  }
407 
408  src += width;
409  }
410 
411  count = put_bits_count(&pb) & 0x1F;
412 
413  if (count)
414  put_bits(&pb, 32 - count, 0);
415 
416  count = put_bits_count(&pb);
417 
418  flush_put_bits(&pb);
419 
420  return count >> 3;
421 }
422 
424  const AVFrame *frame, int *got_packet)
425 {
426  MagicYUVContext *s = avctx->priv_data;
427  PutByteContext pb;
428  const int width = avctx->width, height = avctx->height;
429  int pos, slice, i, j, ret = 0;
430 
431  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * s->nb_slices + width * height) *
432  s->planes + 256, 0);
433  if (ret < 0)
434  return ret;
435 
436  bytestream2_init_writer(&pb, pkt->data, pkt->size);
437  bytestream2_put_le32(&pb, MKTAG('M', 'A', 'G', 'Y'));
438  bytestream2_put_le32(&pb, 32); // header size
439  bytestream2_put_byte(&pb, 7); // version
440  bytestream2_put_byte(&pb, s->format);
441  bytestream2_put_byte(&pb, 12); // max huffman length
442  bytestream2_put_byte(&pb, 0);
443 
444  bytestream2_put_byte(&pb, 0);
445  bytestream2_put_byte(&pb, 0);
446  bytestream2_put_byte(&pb, 32); // coder type
447  bytestream2_put_byte(&pb, 0);
448 
449  bytestream2_put_le32(&pb, avctx->width);
450  bytestream2_put_le32(&pb, avctx->height);
451  bytestream2_put_le32(&pb, avctx->width);
452  bytestream2_put_le32(&pb, avctx->height);
453  bytestream2_put_le32(&pb, 0);
454 
455  for (i = 0; i < s->planes; i++) {
456  bytestream2_put_le32(&pb, 0);
457  for (j = 1; j < s->nb_slices; j++) {
458  bytestream2_put_le32(&pb, 0);
459  }
460  }
461 
462  bytestream2_put_byte(&pb, s->planes);
463 
464  for (i = 0; i < s->planes; i++) {
465  for (slice = 0; slice < s->nb_slices; slice++) {
466  bytestream2_put_byte(&pb, i);
467  }
468  }
469 
470  if (s->correlate) {
471  uint8_t *r, *g, *b;
472  AVFrame *p = av_frame_clone(frame);
473 
474  g = p->data[0];
475  b = p->data[1];
476  r = p->data[2];
477 
478  for (i = 0; i < height; i++) {
479  s->llvidencdsp.diff_bytes(b, b, g, width);
480  s->llvidencdsp.diff_bytes(r, r, g, width);
481  g += p->linesize[0];
482  b += p->linesize[1];
483  r += p->linesize[2];
484  }
485 
486  FFSWAP(uint8_t*, p->data[0], p->data[1]);
487  FFSWAP(int, p->linesize[0], p->linesize[1]);
488 
489  for (i = 0; i < s->planes; i++) {
490  for (slice = 0; slice < s->nb_slices; slice++) {
491  s->predict(s, p->data[i], s->slices[i], p->linesize[i],
492  p->width, p->height);
493  }
494  }
495 
496  av_frame_free(&p);
497  } else {
498  for (i = 0; i < s->planes; i++) {
499  for (slice = 0; slice < s->nb_slices; slice++) {
500  s->predict(s, frame->data[i], s->slices[i], frame->linesize[i],
501  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
502  AV_CEIL_RSHIFT(frame->height, s->vshift[i]));
503  }
504  }
505  }
506 
508 
509  for (i = 0; i < s->planes; i++) {
510  encode_table(avctx, s->slices[i],
511  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
512  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
513  &s->pb, s->he[i]);
514  }
515  s->tables_size = (put_bits_count(&s->pb) + 7) >> 3;
517 
518  for (i = 0; i < s->planes; i++) {
519  unsigned slice_size;
520 
521  s->slice_pos[i] = bytestream2_tell_p(&pb);
522  slice_size = encode_slice(s->slices[i], pkt->data + bytestream2_tell_p(&pb),
524  AV_CEIL_RSHIFT(frame->width, s->hshift[i]),
525  AV_CEIL_RSHIFT(frame->height, s->vshift[i]),
526  s->he[i], s->frame_pred);
527  bytestream2_skip_p(&pb, slice_size);
528  }
529 
530  pos = bytestream2_tell_p(&pb);
531  bytestream2_seek_p(&pb, 32, SEEK_SET);
532  bytestream2_put_le32(&pb, s->slice_pos[0] - 32);
533  for (i = 0; i < s->planes; i++) {
534  bytestream2_put_le32(&pb, s->slice_pos[i] - 32);
535  }
536  bytestream2_seek_p(&pb, pos, SEEK_SET);
537 
538  pkt->size = bytestream2_tell_p(&pb);
539  pkt->flags |= AV_PKT_FLAG_KEY;
540 
541  *got_packet = 1;
542 
543  return 0;
544 }
545 
547 {
548  MagicYUVContext *s = avctx->priv_data;
549  int i;
550 
551  for (i = 0; i < s->planes; i++)
552  av_freep(&s->slices[i]);
553 
554  return 0;
555 }
556 
557 #define OFFSET(x) offsetof(MagicYUVContext, x)
558 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
559 static const AVOption options[] = {
560  { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, {.i64=LEFT}, LEFT, MEDIAN, VE, "pred" },
561  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, 0, 0, VE, "pred" },
562  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = GRADIENT }, 0, 0, VE, "pred" },
563  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, 0, 0, VE, "pred" },
564  { NULL},
565 };
566 
567 static const AVClass magicyuv_class = {
568  .class_name = "magicyuv",
569  .item_name = av_default_item_name,
570  .option = options,
571  .version = LIBAVUTIL_VERSION_INT,
572 };
573 
575  .name = "magicyuv",
576  .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
577  .type = AVMEDIA_TYPE_VIDEO,
578  .id = AV_CODEC_ID_MAGICYUV,
579  .priv_data_size = sizeof(MagicYUVContext),
580  .priv_class = &magicyuv_class,
582  .close = magy_encode_close,
583  .encode2 = magy_encode_frame,
585  .pix_fmts = (const enum AVPixelFormat[]) {
589  },
590 };
#define NULL
Definition: coverity.c:32
#define OFFSET(x)
Definition: magicyuvenc.c:557
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
AVOption.
Definition: opt.h:246
static const AVOption options[]
Definition: magicyuvenc.c:559
static void calculate_codes(HuffEntry *he)
Definition: magicyuvenc.c:260
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: avcodec.h:1478
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
unsigned tables_size
Definition: magicyuvenc.c:67
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static void left_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:74
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
static av_always_inline void predict(PredictorState *ps, float *coef, int output_enable)
Definition: aacdec.c:174
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3477
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1067
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
unsigned slice_pos[4]
Definition: magicyuvenc.c:66
PutBitContext pb
Definition: magicyuvenc.c:56
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int value
input value
Definition: magicyuvenc.c:49
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
Multithreading support functions.
const char * from
Definition: jacosubdec.c:65
static av_cold int magy_encode_init(AVCodecContext *avctx)
Definition: magicyuvenc.c:147
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:253
HuffEntry he[4][256]
Definition: magicyuvenc.c:68
void(* diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
#define height
uint8_t * data
Definition: avcodec.h:1477
static int encode_table(AVCodecContext *avctx, uint8_t *dst, int width, int height, PutBitContext *pb, HuffEntry *he)
Definition: magicyuvenc.c:364
#define VE
Definition: magicyuvenc.c:558
int hshift[4]
Definition: magicyuv.c:65
ptrdiff_t size
Definition: opengl_enc.c:100
uint32_t code
Definition: magicyuv.c:50
int nitems
number of items in the list and probability ex. 4
Definition: magicyuvenc.c:290
#define av_log(a,...)
const char * to
Definition: webvttdec.c:34
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
static const struct @309 planes[]
static void median_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:126
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV&#39;s variant of median prediction.
static void magy_huffman_compute_bits(PTable *prob_table, HuffEntry *distincts, int size, int max_length)
Definition: magicyuvenc.c:303
void(* predict)(struct MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:70
static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
Definition: bytestream.h:159
static av_cold int magy_encode_close(AVCodecContext *avctx)
Definition: magicyuvenc.c:546
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
uint8_t sym
Definition: magicyuvenc.c:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
int64_t prob
number of occurences of this value in input
Definition: magicyuvenc.c:50
GLsizei count
Definition: opengl_enc.c:108
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
Slice * slices[4]
Definition: magicyuv.c:67
LLVidEncDSPContext llvidencdsp
Definition: magicyuvenc.c:69
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
Definition: bytestream.h:176
static void gradient_predict(MagicYUVContext *s, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: magicyuvenc.c:98
int item_idx[515]
index range for each item in items 0, 2, 5, 9, 13
Definition: magicyuvenc.c:291
#define b
Definition: input.c:41
int vshift[4]
Definition: magicyuv.c:66
#define width
static int encode_slice(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he, int prediction)
Definition: magicyuvenc.c:390
int width
picture width / height.
Definition: avcodec.h:1738
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static int magy_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: magicyuvenc.c:423
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodec ff_magicyuv_encoder
Definition: magicyuvenc.c:574
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
Used to store intermediate lists in the package merge algorithm.
Definition: magicyuvenc.c:289
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
static int compare_by_prob(const void *a, const void *b)
Definition: magicyuvenc.c:296
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
main external API structure.
Definition: avcodec.h:1565
Used to assign a occurrence count or "probability" to an input value.
Definition: magicyuvenc.c:48
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1590
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
int extradata_size
Definition: avcodec.h:1667
Describe the class of an AVClass context structure.
Definition: log.h:67
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
int probability[514]
probability of each item 3, 8, 18, 46
Definition: magicyuvenc.c:292
uint8_t format
Definition: magicyuvenc.c:58
static int huff_cmp_sym(const void *a, const void *b)
Definition: magicyuvenc.c:254
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal api header.
Prediction
Definition: magicyuvenc.c:36
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static int magy_huff_cmp_len(const void *a, const void *b)
Definition: magicyuvenc.c:248
uint16_t sym
Definition: magicyuv.c:48
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
static const AVClass magicyuv_class
Definition: magicyuvenc.c:567
void * priv_data
Definition: avcodec.h:1592
static void count_usage(uint8_t *src, int width, int height, PTable *counts)
Definition: magicyuvenc.c:276
int items[257 *16]
chain of all individual values that make up items A, B, A, B, C, A, B, C, D, C, D, D, E
Definition: magicyuvenc.c:293
int height
Definition: frame.h:326
#define av_freep(p)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
Definition: common.h:366
float min
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
for(j=16;j >0;--j)
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
bitstream writer API