FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "avcodec.h"
32 #include "encode.h"
33 #include "huffyuv.h"
34 #include "huffman.h"
35 #include "huffyuvencdsp.h"
36 #include "internal.h"
37 #include "lossless_videoencdsp.h"
38 #include "put_bits.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixdesc.h"
41 
42 static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
43  const uint8_t *src0, const uint8_t *src1, int w)
44 {
45  if (s->bps <= 8) {
46  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
47  } else {
48  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
49  }
50 }
51 
52 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
53  const uint8_t *src, int w, int left)
54 {
55  int i;
56  int min_width = FFMIN(w, 32);
57 
58  if (s->bps <= 8) {
59  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
60  const int temp = src[i];
61  dst[i] = temp - left;
62  left = temp;
63  }
64  if (w < 32)
65  return left;
66  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
67  return src[w-1];
68  } else {
69  const uint16_t *src16 = (const uint16_t *)src;
70  uint16_t *dst16 = ( uint16_t *)dst;
71  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
72  const int temp = src16[i];
73  dst16[i] = temp - left;
74  left = temp;
75  }
76  if (w < 32)
77  return left;
78  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
79  return src16[w-1];
80  }
81 }
82 
83 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
84  const uint8_t *src, int w,
85  int *red, int *green, int *blue,
86  int *alpha)
87 {
88  int i;
89  int r, g, b, a;
90  int min_width = FFMIN(w, 8);
91  r = *red;
92  g = *green;
93  b = *blue;
94  a = *alpha;
95 
96  for (i = 0; i < min_width; i++) {
97  const int rt = src[i * 4 + R];
98  const int gt = src[i * 4 + G];
99  const int bt = src[i * 4 + B];
100  const int at = src[i * 4 + A];
101  dst[i * 4 + R] = rt - r;
102  dst[i * 4 + G] = gt - g;
103  dst[i * 4 + B] = bt - b;
104  dst[i * 4 + A] = at - a;
105  r = rt;
106  g = gt;
107  b = bt;
108  a = at;
109  }
110 
111  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
112 
113  *red = src[(w - 1) * 4 + R];
114  *green = src[(w - 1) * 4 + G];
115  *blue = src[(w - 1) * 4 + B];
116  *alpha = src[(w - 1) * 4 + A];
117 }
118 
119 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
120  uint8_t *src, int w,
121  int *red, int *green, int *blue)
122 {
123  int i;
124  int r, g, b;
125  r = *red;
126  g = *green;
127  b = *blue;
128  for (i = 0; i < FFMIN(w, 16); i++) {
129  const int rt = src[i * 3 + 0];
130  const int gt = src[i * 3 + 1];
131  const int bt = src[i * 3 + 2];
132  dst[i * 3 + 0] = rt - r;
133  dst[i * 3 + 1] = gt - g;
134  dst[i * 3 + 2] = bt - b;
135  r = rt;
136  g = gt;
137  b = bt;
138  }
139 
140  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
141 
142  *red = src[(w - 1) * 3 + 0];
143  *green = src[(w - 1) * 3 + 1];
144  *blue = src[(w - 1) * 3 + 2];
145 }
146 
147 static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
148 {
149  if (s->bps <= 8) {
150  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
151  } else {
152  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
153  }
154 }
155 
156 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
157 {
158  int i;
159  int index = 0;
160  int n = s->vlc_n;
161 
162  for (i = 0; i < n;) {
163  int val = len[i];
164  int repeat = 0;
165 
166  for (; i < n && len[i] == val && repeat < 255; i++)
167  repeat++;
168 
169  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
170  if (repeat > 7) {
171  buf[index++] = val;
172  buf[index++] = repeat;
173  } else {
174  buf[index++] = val | (repeat << 5);
175  }
176  }
177 
178  return index;
179 }
180 
181 static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
182 {
183  int i, ret;
184  int size = 0;
185  int count = 3;
186 
187  if (s->version > 2)
188  count = 1 + s->alpha + 2*s->chroma;
189 
190  for (i = 0; i < count; i++) {
191  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
192  return ret;
193 
194  if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
195  return -1;
196  }
197 
198  size += store_table(s, s->len[i], buf + size);
199  }
200  return size;
201 }
202 
204 {
205  HYuvContext *s = avctx->priv_data;
206  int i, j;
207  int ret;
209 
210  ff_huffyuv_common_init(avctx);
211  ff_huffyuvencdsp_init(&s->hencdsp, avctx);
212  ff_llvidencdsp_init(&s->llvidencdsp);
213 
214  avctx->extradata = av_mallocz(3*MAX_N + 4);
215  if (s->flags&AV_CODEC_FLAG_PASS1) {
216 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
217  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
218  if (!avctx->stats_out)
219  return AVERROR(ENOMEM);
220  }
221  s->version = 2;
222 
223  if (!avctx->extradata)
224  return AVERROR(ENOMEM);
225 
226  s->bps = desc->comp[0].depth;
227  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
228  s->chroma = desc->nb_components > 2;
229  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
231  &s->chroma_h_shift,
232  &s->chroma_v_shift);
233 
234  switch (avctx->pix_fmt) {
235  case AV_PIX_FMT_YUV420P:
236  case AV_PIX_FMT_YUV422P:
237  if (s->width & 1) {
238  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
239  return AVERROR(EINVAL);
240  }
241  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
242  break;
243  case AV_PIX_FMT_YUV444P:
244  case AV_PIX_FMT_YUV410P:
245  case AV_PIX_FMT_YUV411P:
246  case AV_PIX_FMT_YUV440P:
247  case AV_PIX_FMT_GBRP:
248  case AV_PIX_FMT_GBRP9:
249  case AV_PIX_FMT_GBRP10:
250  case AV_PIX_FMT_GBRP12:
251  case AV_PIX_FMT_GBRP14:
252  case AV_PIX_FMT_GBRP16:
253  case AV_PIX_FMT_GRAY8:
254  case AV_PIX_FMT_GRAY16:
255  case AV_PIX_FMT_YUVA444P:
256  case AV_PIX_FMT_YUVA420P:
257  case AV_PIX_FMT_YUVA422P:
258  case AV_PIX_FMT_GBRAP:
259  case AV_PIX_FMT_YUV420P9:
264  case AV_PIX_FMT_YUV422P9:
269  case AV_PIX_FMT_YUV444P9:
283  s->version = 3;
284  break;
285  case AV_PIX_FMT_RGB32:
286  s->bitstream_bpp = 32;
287  break;
288  case AV_PIX_FMT_RGB24:
289  s->bitstream_bpp = 24;
290  break;
291  default:
292  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
293  return AVERROR(EINVAL);
294  }
295  s->n = 1<<s->bps;
296  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
297 
298  avctx->bits_per_coded_sample = s->bitstream_bpp;
299  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
300  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
301  if (s->context) {
302  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
303  av_log(avctx, AV_LOG_ERROR,
304  "context=1 is not compatible with "
305  "2 pass huffyuv encoding\n");
306  return AVERROR(EINVAL);
307  }
308  }
309 
310  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
311  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
312  av_log(avctx, AV_LOG_ERROR,
313  "Error: YV12 is not supported by huffyuv; use "
314  "vcodec=ffvhuff or format=422p\n");
315  return AVERROR(EINVAL);
316  }
317  if (s->interlaced != ( s->height > 288 ))
318  av_log(avctx, AV_LOG_INFO,
319  "using huffyuv 2.2.0 or newer interlacing flag\n");
320  }
321 
322  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
323  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
324  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
325  return AVERROR(EINVAL);
326  }
327 
328  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
329  av_log(avctx, AV_LOG_ERROR,
330  "Error: RGB is incompatible with median predictor\n");
331  return AVERROR(EINVAL);
332  }
333 
334  ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
335  ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
336  if (s->context)
337  ((uint8_t*)avctx->extradata)[2] |= 0x40;
338  if (s->version < 3) {
339  ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
340  ((uint8_t*)avctx->extradata)[3] = 0;
341  } else {
342  ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
343  if (s->chroma)
344  ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
345  if (s->alpha)
346  ((uint8_t*)avctx->extradata)[2] |= 4;
347  ((uint8_t*)avctx->extradata)[3] = 1;
348  }
349  s->avctx->extradata_size = 4;
350 
351  if (avctx->stats_in) {
352  char *p = avctx->stats_in;
353 
354  for (i = 0; i < 4; i++)
355  for (j = 0; j < s->vlc_n; j++)
356  s->stats[i][j] = 1;
357 
358  for (;;) {
359  for (i = 0; i < 4; i++) {
360  char *next;
361 
362  for (j = 0; j < s->vlc_n; j++) {
363  s->stats[i][j] += strtol(p, &next, 0);
364  if (next == p) return -1;
365  p = next;
366  }
367  }
368  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
369  }
370  } else {
371  for (i = 0; i < 4; i++)
372  for (j = 0; j < s->vlc_n; j++) {
373  int d = FFMIN(j, s->vlc_n - j);
374 
375  s->stats[i][j] = 100000000 / (d*d + 1);
376  }
377  }
378 
379  ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size);
380  if (ret < 0)
381  return ret;
382  s->avctx->extradata_size += ret;
383 
384  if (s->context) {
385  for (i = 0; i < 4; i++) {
386  int pels = s->width * s->height / (i ? 40 : 10);
387  for (j = 0; j < s->vlc_n; j++) {
388  int d = FFMIN(j, s->vlc_n - j);
389  s->stats[i][j] = pels/(d*d + 1);
390  }
391  }
392  } else {
393  for (i = 0; i < 4; i++)
394  for (j = 0; j < s->vlc_n; j++)
395  s->stats[i][j]= 0;
396  }
397 
398  if (ff_huffyuv_alloc_temp(s)) {
400  return AVERROR(ENOMEM);
401  }
402 
403  s->picture_number=0;
404 
405  return 0;
406 }
407 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
408 {
409  int i;
410  const uint8_t *y = s->temp[0] + offset;
411  const uint8_t *u = s->temp[1] + offset / 2;
412  const uint8_t *v = s->temp[2] + offset / 2;
413 
414  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
415  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
416  return -1;
417  }
418 
419 #define LOAD4\
420  int y0 = y[2 * i];\
421  int y1 = y[2 * i + 1];\
422  int u0 = u[i];\
423  int v0 = v[i];
424 
425  count /= 2;
426 
427  if (s->flags & AV_CODEC_FLAG_PASS1) {
428  for(i = 0; i < count; i++) {
429  LOAD4;
430  s->stats[0][y0]++;
431  s->stats[1][u0]++;
432  s->stats[0][y1]++;
433  s->stats[2][v0]++;
434  }
435  }
436  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
437  return 0;
438  if (s->context) {
439  for (i = 0; i < count; i++) {
440  LOAD4;
441  s->stats[0][y0]++;
442  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
443  s->stats[1][u0]++;
444  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
445  s->stats[0][y1]++;
446  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
447  s->stats[2][v0]++;
448  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
449  }
450  } else {
451  for(i = 0; i < count; i++) {
452  LOAD4;
453  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
454  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
455  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
456  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
457  }
458  }
459  return 0;
460 }
461 
462 static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
463 {
464  int i, count = width/2;
465 
466  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
467  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
468  return -1;
469  }
470 
471 #define LOADEND\
472  int y0 = s->temp[0][width-1];
473 #define LOADEND_14\
474  int y0 = s->temp16[0][width-1] & mask;
475 #define LOADEND_16\
476  int y0 = s->temp16[0][width-1];
477 #define STATEND\
478  s->stats[plane][y0]++;
479 #define STATEND_16\
480  s->stats[plane][y0>>2]++;
481 #define WRITEEND\
482  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
483 #define WRITEEND_16\
484  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
485  put_bits(&s->pb, 2, y0&3);
486 
487 #define LOAD2\
488  int y0 = s->temp[0][2 * i];\
489  int y1 = s->temp[0][2 * i + 1];
490 #define LOAD2_14\
491  int y0 = s->temp16[0][2 * i] & mask;\
492  int y1 = s->temp16[0][2 * i + 1] & mask;
493 #define LOAD2_16\
494  int y0 = s->temp16[0][2 * i];\
495  int y1 = s->temp16[0][2 * i + 1];
496 #define STAT2\
497  s->stats[plane][y0]++;\
498  s->stats[plane][y1]++;
499 #define STAT2_16\
500  s->stats[plane][y0>>2]++;\
501  s->stats[plane][y1>>2]++;
502 #define WRITE2\
503  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
504  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
505 #define WRITE2_16\
506  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
507  put_bits(&s->pb, 2, y0&3);\
508  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
509  put_bits(&s->pb, 2, y1&3);
510 
511  if (s->bps <= 8) {
512  if (s->flags & AV_CODEC_FLAG_PASS1) {
513  for (i = 0; i < count; i++) {
514  LOAD2;
515  STAT2;
516  }
517  if (width&1) {
518  LOADEND;
519  STATEND;
520  }
521  }
522  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
523  return 0;
524 
525  if (s->context) {
526  for (i = 0; i < count; i++) {
527  LOAD2;
528  STAT2;
529  WRITE2;
530  }
531  if (width&1) {
532  LOADEND;
533  STATEND;
534  WRITEEND;
535  }
536  } else {
537  for (i = 0; i < count; i++) {
538  LOAD2;
539  WRITE2;
540  }
541  if (width&1) {
542  LOADEND;
543  WRITEEND;
544  }
545  }
546  } else if (s->bps <= 14) {
547  int mask = s->n - 1;
548  if (s->flags & AV_CODEC_FLAG_PASS1) {
549  for (i = 0; i < count; i++) {
550  LOAD2_14;
551  STAT2;
552  }
553  if (width&1) {
554  LOADEND_14;
555  STATEND;
556  }
557  }
558  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
559  return 0;
560 
561  if (s->context) {
562  for (i = 0; i < count; i++) {
563  LOAD2_14;
564  STAT2;
565  WRITE2;
566  }
567  if (width&1) {
568  LOADEND_14;
569  STATEND;
570  WRITEEND;
571  }
572  } else {
573  for (i = 0; i < count; i++) {
574  LOAD2_14;
575  WRITE2;
576  }
577  if (width&1) {
578  LOADEND_14;
579  WRITEEND;
580  }
581  }
582  } else {
583  if (s->flags & AV_CODEC_FLAG_PASS1) {
584  for (i = 0; i < count; i++) {
585  LOAD2_16;
586  STAT2_16;
587  }
588  if (width&1) {
589  LOADEND_16;
590  STATEND_16;
591  }
592  }
593  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
594  return 0;
595 
596  if (s->context) {
597  for (i = 0; i < count; i++) {
598  LOAD2_16;
599  STAT2_16;
600  WRITE2_16;
601  }
602  if (width&1) {
603  LOADEND_16;
604  STATEND_16;
605  WRITEEND_16;
606  }
607  } else {
608  for (i = 0; i < count; i++) {
609  LOAD2_16;
610  WRITE2_16;
611  }
612  if (width&1) {
613  LOADEND_16;
614  WRITEEND_16;
615  }
616  }
617  }
618 #undef LOAD2
619 #undef STAT2
620 #undef WRITE2
621  return 0;
622 }
623 
624 static int encode_gray_bitstream(HYuvContext *s, int count)
625 {
626  int i;
627 
628  if (put_bytes_left(&s->pb, 0) < 4 * count) {
629  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
630  return -1;
631  }
632 
633 #define LOAD2\
634  int y0 = s->temp[0][2 * i];\
635  int y1 = s->temp[0][2 * i + 1];
636 #define STAT2\
637  s->stats[0][y0]++;\
638  s->stats[0][y1]++;
639 #define WRITE2\
640  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
641  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
642 
643  count /= 2;
644 
645  if (s->flags & AV_CODEC_FLAG_PASS1) {
646  for (i = 0; i < count; i++) {
647  LOAD2;
648  STAT2;
649  }
650  }
651  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
652  return 0;
653 
654  if (s->context) {
655  for (i = 0; i < count; i++) {
656  LOAD2;
657  STAT2;
658  WRITE2;
659  }
660  } else {
661  for (i = 0; i < count; i++) {
662  LOAD2;
663  WRITE2;
664  }
665  }
666  return 0;
667 }
668 
669 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
670 {
671  int i;
672 
673  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
674  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
675  return -1;
676  }
677 
678 #define LOAD_GBRA \
679  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
680  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
681  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
682  int a = s->temp[0][planes * i + A];
683 
684 #define STAT_BGRA \
685  s->stats[0][b]++; \
686  s->stats[1][g]++; \
687  s->stats[2][r]++; \
688  if (planes == 4) \
689  s->stats[2][a]++;
690 
691 #define WRITE_GBRA \
692  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
693  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
694  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
695  if (planes == 4) \
696  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
697 
698  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
699  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
700  for (i = 0; i < count; i++) {
701  LOAD_GBRA;
702  STAT_BGRA;
703  }
704  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
705  for (i = 0; i < count; i++) {
706  LOAD_GBRA;
707  STAT_BGRA;
708  WRITE_GBRA;
709  }
710  } else {
711  for (i = 0; i < count; i++) {
712  LOAD_GBRA;
713  WRITE_GBRA;
714  }
715  }
716  return 0;
717 }
718 
720  const AVFrame *pict, int *got_packet)
721 {
722  HYuvContext *s = avctx->priv_data;
723  const int width = s->width;
724  const int width2 = s->width>>1;
725  const int height = s->height;
726  const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
727  const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
728  const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
729  const AVFrame * const p = pict;
730  int i, j, size = 0, ret;
731 
732  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0)
733  return ret;
734 
735  if (s->context) {
737  if (size < 0)
738  return size;
739 
740  for (i = 0; i < 4; i++)
741  for (j = 0; j < s->vlc_n; j++)
742  s->stats[i][j] >>= 1;
743  }
744 
745  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
746 
747  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
748  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
749  int lefty, leftu, leftv, y, cy;
750 
751  put_bits(&s->pb, 8, leftv = p->data[2][0]);
752  put_bits(&s->pb, 8, lefty = p->data[0][1]);
753  put_bits(&s->pb, 8, leftu = p->data[1][0]);
754  put_bits(&s->pb, 8, p->data[0][0]);
755 
756  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
757  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
758  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
759 
761 
762  if (s->predictor==MEDIAN) {
763  int lefttopy, lefttopu, lefttopv;
764  cy = y = 1;
765  if (s->interlaced) {
766  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
767  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
768  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
769 
771  y++; cy++;
772  }
773 
774  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
775  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
776  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
777 
778  encode_422_bitstream(s, 0, 4);
779 
780  lefttopy = p->data[0][3];
781  lefttopu = p->data[1][1];
782  lefttopv = p->data[2][1];
783  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
784  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
785  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
786  encode_422_bitstream(s, 0, width - 4);
787  y++; cy++;
788 
789  for (; y < height; y++,cy++) {
790  uint8_t *ydst, *udst, *vdst;
791 
792  if (s->bitstream_bpp == 12) {
793  while (2 * cy > y) {
794  ydst = p->data[0] + p->linesize[0] * y;
795  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
797  y++;
798  }
799  if (y >= height) break;
800  }
801  ydst = p->data[0] + p->linesize[0] * y;
802  udst = p->data[1] + p->linesize[1] * cy;
803  vdst = p->data[2] + p->linesize[2] * cy;
804 
805  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
806  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
807  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
808 
810  }
811  } else {
812  for (cy = y = 1; y < height; y++, cy++) {
813  uint8_t *ydst, *udst, *vdst;
814 
815  /* encode a luma only line & y++ */
816  if (s->bitstream_bpp == 12) {
817  ydst = p->data[0] + p->linesize[0] * y;
818 
819  if (s->predictor == PLANE && s->interlaced < y) {
820  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
821 
822  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
823  } else {
824  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
825  }
827  y++;
828  if (y >= height) break;
829  }
830 
831  ydst = p->data[0] + p->linesize[0] * y;
832  udst = p->data[1] + p->linesize[1] * cy;
833  vdst = p->data[2] + p->linesize[2] * cy;
834 
835  if (s->predictor == PLANE && s->interlaced < cy) {
836  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
837  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
838  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
839 
840  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
841  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
842  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
843  } else {
844  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
845  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
846  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
847  }
848 
850  }
851  }
852  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
853  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
854  const int stride = -p->linesize[0];
855  const int fake_stride = -fake_ystride;
856  int y;
857  int leftr, leftg, leftb, lefta;
858 
859  put_bits(&s->pb, 8, lefta = data[A]);
860  put_bits(&s->pb, 8, leftr = data[R]);
861  put_bits(&s->pb, 8, leftg = data[G]);
862  put_bits(&s->pb, 8, leftb = data[B]);
863 
864  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
865  &leftr, &leftg, &leftb, &lefta);
866  encode_bgra_bitstream(s, width - 1, 4);
867 
868  for (y = 1; y < s->height; y++) {
869  uint8_t *dst = data + y*stride;
870  if (s->predictor == PLANE && s->interlaced < y) {
871  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
872  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
873  &leftr, &leftg, &leftb, &lefta);
874  } else {
875  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
876  &leftr, &leftg, &leftb, &lefta);
877  }
879  }
880  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
881  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
882  const int stride = -p->linesize[0];
883  const int fake_stride = -fake_ystride;
884  int y;
885  int leftr, leftg, leftb;
886 
887  put_bits(&s->pb, 8, leftr = data[0]);
888  put_bits(&s->pb, 8, leftg = data[1]);
889  put_bits(&s->pb, 8, leftb = data[2]);
890  put_bits(&s->pb, 8, 0);
891 
892  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
893  &leftr, &leftg, &leftb);
895 
896  for (y = 1; y < s->height; y++) {
897  uint8_t *dst = data + y * stride;
898  if (s->predictor == PLANE && s->interlaced < y) {
899  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
900  width * 3);
901  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
902  &leftr, &leftg, &leftb);
903  } else {
904  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
905  &leftr, &leftg, &leftb);
906  }
908  }
909  } else if (s->version > 2) {
910  int plane;
911  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
912  int left, y;
913  int w = width;
914  int h = height;
915  int fake_stride = fake_ystride;
916 
917  if (s->chroma && (plane == 1 || plane == 2)) {
918  w >>= s->chroma_h_shift;
919  h >>= s->chroma_v_shift;
920  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
921  }
922 
923  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
924 
925  encode_plane_bitstream(s, w, plane);
926 
927  if (s->predictor==MEDIAN) {
928  int lefttop;
929  y = 1;
930  if (s->interlaced) {
931  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
932 
933  encode_plane_bitstream(s, w, plane);
934  y++;
935  }
936 
937  lefttop = p->data[plane][0];
938 
939  for (; y < h; y++) {
940  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
941 
942  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
943 
944  encode_plane_bitstream(s, w, plane);
945  }
946  } else {
947  for (y = 1; y < h; y++) {
948  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
949 
950  if (s->predictor == PLANE && s->interlaced < y) {
951  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
952 
953  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
954  } else {
955  left = sub_left_prediction(s, s->temp[0], dst, w , left);
956  }
957 
958  encode_plane_bitstream(s, w, plane);
959  }
960  }
961  }
962  } else {
963  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
964  }
965  emms_c();
966 
967  size += (put_bits_count(&s->pb) + 31) / 8;
968  put_bits(&s->pb, 16, 0);
969  put_bits(&s->pb, 15, 0);
970  size /= 4;
971 
972  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
973  int j;
974  char *p = avctx->stats_out;
975  char *end = p + STATS_OUT_SIZE;
976  for (i = 0; i < 4; i++) {
977  for (j = 0; j < s->vlc_n; j++) {
978  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
979  p += strlen(p);
980  s->stats[i][j]= 0;
981  }
982  snprintf(p, end-p, "\n");
983  p++;
984  if (end <= p)
985  return AVERROR(ENOMEM);
986  }
987  } else if (avctx->stats_out)
988  avctx->stats_out[0] = '\0';
989  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
990  flush_put_bits(&s->pb);
991  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
992  }
993 
994  s->picture_number++;
995 
996  pkt->size = size * 4;
997  *got_packet = 1;
998 
999  return 0;
1000 }
1001 
1003 {
1004  HYuvContext *s = avctx->priv_data;
1005 
1007 
1008  av_freep(&avctx->stats_out);
1009 
1010  return 0;
1011 }
1012 
1013 #define OFFSET(x) offsetof(HYuvContext, x)
1014 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1015 
1016 #define COMMON_OPTIONS \
1017  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1018  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 }, \
1019  0, 1, VE }, \
1020  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1021  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1022  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1023  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1024 
1025 static const AVOption normal_options[] = {
1027  { NULL },
1028 };
1029 
1030 static const AVOption ff_options[] = {
1032  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1033  { NULL },
1034 };
1035 
1036 static const AVClass normal_class = {
1037  .class_name = "huffyuv",
1038  .item_name = av_default_item_name,
1039  .option = normal_options,
1040  .version = LIBAVUTIL_VERSION_INT,
1041 };
1042 
1043 static const AVClass ff_class = {
1044  .class_name = "ffvhuff",
1045  .item_name = av_default_item_name,
1046  .option = ff_options,
1047  .version = LIBAVUTIL_VERSION_INT,
1048 };
1049 
1051  .name = "huffyuv",
1052  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1053  .type = AVMEDIA_TYPE_VIDEO,
1054  .id = AV_CODEC_ID_HUFFYUV,
1055  .priv_data_size = sizeof(HYuvContext),
1056  .init = encode_init,
1057  .encode2 = encode_frame,
1058  .close = encode_end,
1059  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
1060  .priv_class = &normal_class,
1061  .pix_fmts = (const enum AVPixelFormat[]){
1064  },
1065  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1067 };
1068 
1069 #if CONFIG_FFVHUFF_ENCODER
1070 const AVCodec ff_ffvhuff_encoder = {
1071  .name = "ffvhuff",
1072  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1073  .type = AVMEDIA_TYPE_VIDEO,
1074  .id = AV_CODEC_ID_FFVHUFF,
1075  .priv_data_size = sizeof(HYuvContext),
1076  .init = encode_init,
1077  .encode2 = encode_frame,
1078  .close = encode_end,
1079  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
1080  .priv_class = &ff_class,
1081  .pix_fmts = (const enum AVPixelFormat[]){
1097  },
1098  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1100 };
1101 #endif
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:434
HYuvContext
Definition: huffyuv.h:55
AVCodec
AVCodec.
Definition: codec.h:202
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
sub_median_prediction
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:147
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, AVCodecContext *avctx)
Definition: huffyuvencdsp.c:71
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:75
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1285
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2564
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:1013
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:203
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:426
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
pixdesc.h
index
fg index
Definition: ffmpeg_filter.c:168
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:433
w
uint8_t w
Definition: llviddspenc.c:38
ff_class
static const AVClass ff_class
Definition: huffyuvenc.c:1043
R
#define R
Definition: huffyuvdsp.h:34
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
ff_ffvhuff_encoder
const AVCodec ff_ffvhuff_encoder
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:428
AVOption
AVOption.
Definition: opt.h:247
encode.h
b
#define b
Definition: input.c:40
encode_gray_bitstream
static int encode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvenc.c:624
data
const char data[16]
Definition: mxf.c:143
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:391
STATS_OUT_SIZE
#define STATS_OUT_SIZE
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:281
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:429
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
planes
static const struct @320 planes[]
A
#define A(x)
Definition: vp56_arith.h:28
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:425
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:409
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
v0
#define v0
Definition: regdef.h:26
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
sub_left_prediction
static int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:52
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:407
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:435
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:389
LOAD2
#define LOAD2
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2592
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:375
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:291
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:134
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:394
store_table
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:156
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:403
ff_huffyuv_encoder
const AVCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1050
mask
static const uint16_t mask[17]
Definition: lzw.c:38
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:83
STATEND
#define STATEND
WRITE2
#define WRITE2
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1244
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:404
g
const char * g
Definition: vf_curves.c:117
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:119
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1036
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:388
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:402
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
AV_INPUT_BUFFER_MIN_SIZE
#define AV_INPUT_BUFFER_MIN_SIZE
Definition: avcodec.h:185
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:117
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:46
LOADEND_16
#define LOADEND_16
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:410
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
store_huffman_tables
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
Definition: huffyuvenc.c:181
WRITEEND
#define WRITEEND
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
src
#define src
Definition: vp8dsp.c:255
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:57
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:392
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:406
diff_bytes
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:42
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:1002
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
LOAD2_16
#define LOAD2_16
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1236
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:396
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:398
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:235
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:364
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:430
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: huffyuvenc.c:1016
src0
#define src0
Definition: h264pred.c:139
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
VE
#define VE
Definition: huffyuvenc.c:1014
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
lossless_videoencdsp.h
src1
#define src1
Definition: h264pred.c:140
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:79
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
encode_422_bitstream
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
Definition: huffyuvenc.c:407
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
normal_options
static const AVOption normal_options[]
Definition: huffyuvenc.c:1025
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:408
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:390
avcodec.h
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1280
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:427
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:395
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:400
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext
main external API structure.
Definition: avcodec.h:383
LOAD_GBRA
#define LOAD_GBRA
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
huffman.h
temp
else temp
Definition: vf_mcdeint.c:248
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_options
static const AVOption ff_options[]
Definition: huffyuvenc.c:1030
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: huffyuvenc.c:719
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:156
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
Definition: huffyuvenc.c:669
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:401
huffyuv.h
put_bits.h
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
encode_plane_bitstream
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvenc.c:462
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:399
STAT_BGRA
#define STAT_BGRA
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:231