FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see https://multimedia.cx/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "config_components.h"
32 
33 #include "avcodec.h"
34 #include "bswapdsp.h"
35 #include "codec_internal.h"
36 #include "encode.h"
37 #include "huffyuv.h"
38 #include "huffman.h"
39 #include "huffyuvencdsp.h"
40 #include "lossless_videoencdsp.h"
41 #include "put_bits.h"
42 #include "libavutil/emms.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/pixdesc.h"
45 
46 typedef struct HYuvEncContext {
47  AVClass *class;
54  int version;
55  int bps;
56  int n; // 1<<bps
57  int vlc_n; // number of vlc codes (FFMIN(1<<bps, MAX_VLC_N))
58  int alpha;
59  int chroma;
60  int yuv;
63  int flags;
64  int context;
66 
67  uint8_t *temp[3];
68  uint16_t *temp16[3]; ///< identical to temp but 16bit type
69  uint64_t stats[4][MAX_VLC_N];
70  uint8_t len[4][MAX_VLC_N];
71  uint32_t bits[4][MAX_VLC_N];
75  int non_determ; // non-deterministic, multi-threaded encoder allowed
77 
78 static inline void diff_bytes(HYuvEncContext *s, uint8_t *dst,
79  const uint8_t *src0, const uint8_t *src1, int w)
80 {
81  if (s->bps <= 8) {
82  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
83  } else {
84  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
85  }
86 }
87 
88 static inline int sub_left_prediction(HYuvEncContext *s, uint8_t *dst,
89  const uint8_t *src, int w, int left)
90 {
91  int i;
92  int min_width = FFMIN(w, 32);
93 
94  if (s->bps <= 8) {
95  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
96  const int temp = src[i];
97  dst[i] = temp - left;
98  left = temp;
99  }
100  if (w < 32)
101  return left;
102  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
103  return src[w-1];
104  } else {
105  const uint16_t *src16 = (const uint16_t *)src;
106  uint16_t *dst16 = ( uint16_t *)dst;
107  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
108  const int temp = src16[i];
109  dst16[i] = temp - left;
110  left = temp;
111  }
112  if (w < 32)
113  return left;
114  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
115  return src16[w-1];
116  }
117 }
118 
119 static inline void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst,
120  const uint8_t *src, int w,
121  int *red, int *green, int *blue,
122  int *alpha)
123 {
124  int i;
125  int r, g, b, a;
126  int min_width = FFMIN(w, 8);
127  r = *red;
128  g = *green;
129  b = *blue;
130  a = *alpha;
131 
132  for (i = 0; i < min_width; i++) {
133  const int rt = src[i * 4 + R];
134  const int gt = src[i * 4 + G];
135  const int bt = src[i * 4 + B];
136  const int at = src[i * 4 + A];
137  dst[i * 4 + R] = rt - r;
138  dst[i * 4 + G] = gt - g;
139  dst[i * 4 + B] = bt - b;
140  dst[i * 4 + A] = at - a;
141  r = rt;
142  g = gt;
143  b = bt;
144  a = at;
145  }
146 
147  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
148 
149  *red = src[(w - 1) * 4 + R];
150  *green = src[(w - 1) * 4 + G];
151  *blue = src[(w - 1) * 4 + B];
152  *alpha = src[(w - 1) * 4 + A];
153 }
154 
155 static inline void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst,
156  const uint8_t *src, int w,
157  int *red, int *green, int *blue)
158 {
159  int i;
160  int r, g, b;
161  r = *red;
162  g = *green;
163  b = *blue;
164  for (i = 0; i < FFMIN(w, 16); i++) {
165  const int rt = src[i * 3 + 0];
166  const int gt = src[i * 3 + 1];
167  const int bt = src[i * 3 + 2];
168  dst[i * 3 + 0] = rt - r;
169  dst[i * 3 + 1] = gt - g;
170  dst[i * 3 + 2] = bt - b;
171  r = rt;
172  g = gt;
173  b = bt;
174  }
175 
176  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
177 
178  *red = src[(w - 1) * 3 + 0];
179  *green = src[(w - 1) * 3 + 1];
180  *blue = src[(w - 1) * 3 + 2];
181 }
182 
183 static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst,
184  const uint8_t *src1, const uint8_t *src2,
185  int w, int *left, int *left_top)
186 {
187  if (s->bps <= 8) {
188  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
189  } else {
190  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
191  }
192 }
193 
194 static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
195 {
196  int i;
197  int index = 0;
198  int n = s->vlc_n;
199 
200  for (i = 0; i < n;) {
201  int val = len[i];
202  int repeat = 0;
203 
204  for (; i < n && len[i] == val && repeat < 255; i++)
205  repeat++;
206 
207  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
208  if (repeat > 7) {
209  buf[index++] = val;
210  buf[index++] = repeat;
211  } else {
212  buf[index++] = val | (repeat << 5);
213  }
214  }
215 
216  return index;
217 }
218 
219 static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
220 {
221  int i, ret;
222  int size = 0;
223  int count = 3;
224 
225  if (s->version > 2)
226  count = 1 + s->alpha + 2*s->chroma;
227 
228  for (i = 0; i < count; i++) {
229  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
230  return ret;
231 
232  if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
233  return -1;
234  }
235 
236  size += store_table(s, s->len[i], buf + size);
237  }
238  return size;
239 }
240 
242 {
243  HYuvEncContext *s = avctx->priv_data;
244  int i, j;
245  int ret;
246  const AVPixFmtDescriptor *desc;
247 
248  s->avctx = avctx;
249  s->flags = avctx->flags;
250 
251  ff_bswapdsp_init(&s->bdsp);
252  ff_huffyuvencdsp_init(&s->hencdsp, avctx->pix_fmt);
253  ff_llvidencdsp_init(&s->llvidencdsp);
254 
255  avctx->extradata = av_mallocz(3*MAX_N + 4);
256  if (!avctx->extradata)
257  return AVERROR(ENOMEM);
258  if (s->flags&AV_CODEC_FLAG_PASS1) {
259 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
260  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
261  if (!avctx->stats_out)
262  return AVERROR(ENOMEM);
263  }
264  s->version = 2;
265 
266  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
267  s->bps = desc->comp[0].depth;
268  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
269  s->chroma = desc->nb_components > 2;
270  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
271  s->chroma_h_shift = desc->log2_chroma_w;
272  s->chroma_v_shift = desc->log2_chroma_h;
273 
274  switch (avctx->pix_fmt) {
275  case AV_PIX_FMT_YUV420P:
276  case AV_PIX_FMT_YUV422P:
277  if (avctx->width & 1) {
278  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
279  return AVERROR(EINVAL);
280  }
281  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
282  break;
283  case AV_PIX_FMT_YUV444P:
284  case AV_PIX_FMT_YUV410P:
285  case AV_PIX_FMT_YUV411P:
286  case AV_PIX_FMT_YUV440P:
287  case AV_PIX_FMT_GBRP:
288  case AV_PIX_FMT_GBRP9:
289  case AV_PIX_FMT_GBRP10:
290  case AV_PIX_FMT_GBRP12:
291  case AV_PIX_FMT_GBRP14:
292  case AV_PIX_FMT_GBRP16:
293  case AV_PIX_FMT_GRAY8:
294  case AV_PIX_FMT_GRAY16:
295  case AV_PIX_FMT_YUVA444P:
296  case AV_PIX_FMT_YUVA420P:
297  case AV_PIX_FMT_YUVA422P:
298  case AV_PIX_FMT_GBRAP:
299  case AV_PIX_FMT_YUV420P9:
304  case AV_PIX_FMT_YUV422P9:
309  case AV_PIX_FMT_YUV444P9:
323  s->version = 3;
324  break;
325  case AV_PIX_FMT_RGB32:
326  s->bitstream_bpp = 32;
327  break;
328  case AV_PIX_FMT_RGB24:
329  s->bitstream_bpp = 24;
330  break;
331  default:
332  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
333  return AVERROR(EINVAL);
334  }
335  s->n = 1<<s->bps;
336  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
337 
338  avctx->bits_per_coded_sample = s->bitstream_bpp;
339  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
340  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
341  if (s->context) {
342  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
343  av_log(avctx, AV_LOG_ERROR,
344  "context=1 is not compatible with "
345  "2 pass huffyuv encoding\n");
346  return AVERROR(EINVAL);
347  }
348  }
349 
350  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
351  if (s->interlaced != ( avctx->height > 288 ))
352  av_log(avctx, AV_LOG_INFO,
353  "using huffyuv 2.2.0 or newer interlacing flag\n");
354  }
355 
356  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
357  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
358  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
359  return AVERROR(EINVAL);
360  }
361 
362  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
363  av_log(avctx, AV_LOG_ERROR,
364  "Error: RGB is incompatible with median predictor\n");
365  return AVERROR(EINVAL);
366  }
367 
368  avctx->extradata[0] = s->predictor | (s->decorrelate << 6);
369  avctx->extradata[2] = s->interlaced ? 0x10 : 0x20;
370  if (s->context)
371  avctx->extradata[2] |= 0x40;
372  if (s->version < 3) {
373  avctx->extradata[1] = s->bitstream_bpp;
374  avctx->extradata[3] = 0;
375  } else {
376  avctx->extradata[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
377  if (s->chroma)
378  avctx->extradata[2] |= s->yuv ? 1 : 2;
379  if (s->alpha)
380  avctx->extradata[2] |= 4;
381  avctx->extradata[3] = 1;
382  }
383  avctx->extradata_size = 4;
384 
385  if (avctx->stats_in) {
386  char *p = avctx->stats_in;
387 
388  for (i = 0; i < 4; i++)
389  for (j = 0; j < s->vlc_n; j++)
390  s->stats[i][j] = 1;
391 
392  for (;;) {
393  for (i = 0; i < 4; i++) {
394  char *next;
395 
396  for (j = 0; j < s->vlc_n; j++) {
397  s->stats[i][j] += strtol(p, &next, 0);
398  if (next == p) return -1;
399  p = next;
400  }
401  }
402  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
403  }
404  } else {
405  for (i = 0; i < 4; i++)
406  for (j = 0; j < s->vlc_n; j++) {
407  int d = FFMIN(j, s->vlc_n - j);
408 
409  s->stats[i][j] = 100000000 / (d*d + 1);
410  }
411  }
412 
413  ret = store_huffman_tables(s, avctx->extradata + avctx->extradata_size);
414  if (ret < 0)
415  return ret;
416  avctx->extradata_size += ret;
417 
418  if (s->context) {
419  for (i = 0; i < 4; i++) {
420  int pels = avctx->width * avctx->height / (i ? 40 : 10);
421  for (j = 0; j < s->vlc_n; j++) {
422  int d = FFMIN(j, s->vlc_n - j);
423  s->stats[i][j] = pels/(d*d + 1);
424  }
425  }
426  } else {
427  for (i = 0; i < 4; i++)
428  for (j = 0; j < s->vlc_n; j++)
429  s->stats[i][j]= 0;
430  }
431 
432  ret = ff_huffyuv_alloc_temp(s->temp, s->temp16, avctx->width);
433  if (ret < 0)
434  return ret;
435 
436  s->picture_number=0;
437 
438  return 0;
439 }
440 static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
441 {
442  int i;
443  const uint8_t *y = s->temp[0] + offset;
444  const uint8_t *u = s->temp[1] + offset / 2;
445  const uint8_t *v = s->temp[2] + offset / 2;
446 
447  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
448  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
449  return -1;
450  }
451 
452 #define LOAD4\
453  int y0 = y[2 * i];\
454  int y1 = y[2 * i + 1];\
455  int u0 = u[i];\
456  int v0 = v[i];
457 
458  count /= 2;
459 
460  if (s->flags & AV_CODEC_FLAG_PASS1) {
461  for(i = 0; i < count; i++) {
462  LOAD4;
463  s->stats[0][y0]++;
464  s->stats[1][u0]++;
465  s->stats[0][y1]++;
466  s->stats[2][v0]++;
467  }
468  }
469  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
470  return 0;
471  if (s->context) {
472  for (i = 0; i < count; i++) {
473  LOAD4;
474  s->stats[0][y0]++;
475  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
476  s->stats[1][u0]++;
477  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
478  s->stats[0][y1]++;
479  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
480  s->stats[2][v0]++;
481  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
482  }
483  } else {
484  for(i = 0; i < count; i++) {
485  LOAD4;
486  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
487  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
488  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
489  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
490  }
491  }
492  return 0;
493 }
494 
495 static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
496 {
497  int i, count = width/2;
498 
499  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
500  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
501  return -1;
502  }
503 
504 #define LOADEND\
505  int y0 = s->temp[0][width-1];
506 #define LOADEND_14\
507  int y0 = s->temp16[0][width-1] & mask;
508 #define LOADEND_16\
509  int y0 = s->temp16[0][width-1];
510 #define STATEND\
511  s->stats[plane][y0]++;
512 #define STATEND_16\
513  s->stats[plane][y0>>2]++;
514 #define WRITEEND\
515  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
516 #define WRITEEND_16\
517  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
518  put_bits(&s->pb, 2, y0&3);
519 
520 #define LOAD2\
521  int y0 = s->temp[0][2 * i];\
522  int y1 = s->temp[0][2 * i + 1];
523 #define LOAD2_14\
524  int y0 = s->temp16[0][2 * i] & mask;\
525  int y1 = s->temp16[0][2 * i + 1] & mask;
526 #define LOAD2_16\
527  int y0 = s->temp16[0][2 * i];\
528  int y1 = s->temp16[0][2 * i + 1];
529 #define STAT2\
530  s->stats[plane][y0]++;\
531  s->stats[plane][y1]++;
532 #define STAT2_16\
533  s->stats[plane][y0>>2]++;\
534  s->stats[plane][y1>>2]++;
535 #define WRITE2\
536  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
537  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
538 #define WRITE2_16\
539  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
540  put_bits(&s->pb, 2, y0&3);\
541  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
542  put_bits(&s->pb, 2, y1&3);
543 
544  if (s->bps <= 8) {
545  if (s->flags & AV_CODEC_FLAG_PASS1) {
546  for (i = 0; i < count; i++) {
547  LOAD2;
548  STAT2;
549  }
550  if (width&1) {
551  LOADEND;
552  STATEND;
553  }
554  }
555  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
556  return 0;
557 
558  if (s->context) {
559  for (i = 0; i < count; i++) {
560  LOAD2;
561  STAT2;
562  WRITE2;
563  }
564  if (width&1) {
565  LOADEND;
566  STATEND;
567  WRITEEND;
568  }
569  } else {
570  for (i = 0; i < count; i++) {
571  LOAD2;
572  WRITE2;
573  }
574  if (width&1) {
575  LOADEND;
576  WRITEEND;
577  }
578  }
579  } else if (s->bps <= 14) {
580  int mask = s->n - 1;
581  if (s->flags & AV_CODEC_FLAG_PASS1) {
582  for (i = 0; i < count; i++) {
583  LOAD2_14;
584  STAT2;
585  }
586  if (width&1) {
587  LOADEND_14;
588  STATEND;
589  }
590  }
591  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
592  return 0;
593 
594  if (s->context) {
595  for (i = 0; i < count; i++) {
596  LOAD2_14;
597  STAT2;
598  WRITE2;
599  }
600  if (width&1) {
601  LOADEND_14;
602  STATEND;
603  WRITEEND;
604  }
605  } else {
606  for (i = 0; i < count; i++) {
607  LOAD2_14;
608  WRITE2;
609  }
610  if (width&1) {
611  LOADEND_14;
612  WRITEEND;
613  }
614  }
615  } else {
616  if (s->flags & AV_CODEC_FLAG_PASS1) {
617  for (i = 0; i < count; i++) {
618  LOAD2_16;
619  STAT2_16;
620  }
621  if (width&1) {
622  LOADEND_16;
623  STATEND_16;
624  }
625  }
626  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
627  return 0;
628 
629  if (s->context) {
630  for (i = 0; i < count; i++) {
631  LOAD2_16;
632  STAT2_16;
633  WRITE2_16;
634  }
635  if (width&1) {
636  LOADEND_16;
637  STATEND_16;
638  WRITEEND_16;
639  }
640  } else {
641  for (i = 0; i < count; i++) {
642  LOAD2_16;
643  WRITE2_16;
644  }
645  if (width&1) {
646  LOADEND_16;
647  WRITEEND_16;
648  }
649  }
650  }
651 #undef LOAD2
652 #undef STAT2
653 #undef WRITE2
654  return 0;
655 }
656 
657 static int encode_gray_bitstream(HYuvEncContext *s, int count)
658 {
659  int i;
660 
661  if (put_bytes_left(&s->pb, 0) < 4 * count) {
662  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
663  return -1;
664  }
665 
666 #define LOAD2\
667  int y0 = s->temp[0][2 * i];\
668  int y1 = s->temp[0][2 * i + 1];
669 #define STAT2\
670  s->stats[0][y0]++;\
671  s->stats[0][y1]++;
672 #define WRITE2\
673  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
674  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
675 
676  count /= 2;
677 
678  if (s->flags & AV_CODEC_FLAG_PASS1) {
679  for (i = 0; i < count; i++) {
680  LOAD2;
681  STAT2;
682  }
683  }
684  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
685  return 0;
686 
687  if (s->context) {
688  for (i = 0; i < count; i++) {
689  LOAD2;
690  STAT2;
691  WRITE2;
692  }
693  } else {
694  for (i = 0; i < count; i++) {
695  LOAD2;
696  WRITE2;
697  }
698  }
699  return 0;
700 }
701 
702 static inline int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
703 {
704  int i;
705 
706  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
707  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
708  return -1;
709  }
710 
711 #define LOAD_GBRA \
712  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
713  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
714  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
715  int a = s->temp[0][planes * i + A];
716 
717 #define STAT_BGRA \
718  s->stats[0][b]++; \
719  s->stats[1][g]++; \
720  s->stats[2][r]++; \
721  if (planes == 4) \
722  s->stats[2][a]++;
723 
724 #define WRITE_GBRA \
725  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
726  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
727  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
728  if (planes == 4) \
729  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
730 
731  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
732  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
733  for (i = 0; i < count; i++) {
734  LOAD_GBRA;
735  STAT_BGRA;
736  }
737  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
738  for (i = 0; i < count; i++) {
739  LOAD_GBRA;
740  STAT_BGRA;
741  WRITE_GBRA;
742  }
743  } else {
744  for (i = 0; i < count; i++) {
745  LOAD_GBRA;
746  WRITE_GBRA;
747  }
748  }
749  return 0;
750 }
751 
753  const AVFrame *pict, int *got_packet)
754 {
755  HYuvEncContext *s = avctx->priv_data;
756  const int width = avctx->width;
757  const int width2 = avctx->width >> 1;
758  const int height = avctx->height;
759  const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
760  const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
761  const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
762  const AVFrame * const p = pict;
763  int i, j, size = 0, ret;
764 
765  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
766  return ret;
767 
768  if (s->context) {
770  if (size < 0)
771  return size;
772 
773  for (i = 0; i < 4; i++)
774  for (j = 0; j < s->vlc_n; j++)
775  s->stats[i][j] >>= 1;
776  }
777 
778  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
779 
780  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
781  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
782  int lefty, leftu, leftv, y, cy;
783 
784  put_bits(&s->pb, 8, leftv = p->data[2][0]);
785  put_bits(&s->pb, 8, lefty = p->data[0][1]);
786  put_bits(&s->pb, 8, leftu = p->data[1][0]);
787  put_bits(&s->pb, 8, p->data[0][0]);
788 
789  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
790  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
791  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
792 
794 
795  if (s->predictor==MEDIAN) {
796  int lefttopy, lefttopu, lefttopv;
797  cy = y = 1;
798  if (s->interlaced) {
799  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
800  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
801  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
802 
804  y++; cy++;
805  }
806 
807  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
808  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
809  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
810 
811  encode_422_bitstream(s, 0, 4);
812 
813  lefttopy = p->data[0][3];
814  lefttopu = p->data[1][1];
815  lefttopv = p->data[2][1];
816  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
817  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
818  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
819  encode_422_bitstream(s, 0, width - 4);
820  y++; cy++;
821 
822  for (; y < height; y++,cy++) {
823  const uint8_t *ydst, *udst, *vdst;
824 
825  if (s->bitstream_bpp == 12) {
826  while (2 * cy > y) {
827  ydst = p->data[0] + p->linesize[0] * y;
828  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
830  y++;
831  }
832  if (y >= height) break;
833  }
834  ydst = p->data[0] + p->linesize[0] * y;
835  udst = p->data[1] + p->linesize[1] * cy;
836  vdst = p->data[2] + p->linesize[2] * cy;
837 
838  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
839  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
840  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
841 
843  }
844  } else {
845  for (cy = y = 1; y < height; y++, cy++) {
846  const uint8_t *ydst, *udst, *vdst;
847 
848  /* encode a luma only line & y++ */
849  if (s->bitstream_bpp == 12) {
850  ydst = p->data[0] + p->linesize[0] * y;
851 
852  if (s->predictor == PLANE && s->interlaced < y) {
853  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
854 
855  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
856  } else {
857  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
858  }
860  y++;
861  if (y >= height) break;
862  }
863 
864  ydst = p->data[0] + p->linesize[0] * y;
865  udst = p->data[1] + p->linesize[1] * cy;
866  vdst = p->data[2] + p->linesize[2] * cy;
867 
868  if (s->predictor == PLANE && s->interlaced < cy) {
869  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
870  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
871  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
872 
873  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
874  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
875  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
876  } else {
877  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
878  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
879  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
880  }
881 
883  }
884  }
885  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
886  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
887  const int stride = -p->linesize[0];
888  const int fake_stride = -fake_ystride;
889  int leftr, leftg, leftb, lefta;
890 
891  put_bits(&s->pb, 8, lefta = data[A]);
892  put_bits(&s->pb, 8, leftr = data[R]);
893  put_bits(&s->pb, 8, leftg = data[G]);
894  put_bits(&s->pb, 8, leftb = data[B]);
895 
896  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
897  &leftr, &leftg, &leftb, &lefta);
898  encode_bgra_bitstream(s, width - 1, 4);
899 
900  for (int y = 1; y < height; y++) {
901  const uint8_t *dst = data + y*stride;
902  if (s->predictor == PLANE && s->interlaced < y) {
903  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
904  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
905  &leftr, &leftg, &leftb, &lefta);
906  } else {
907  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
908  &leftr, &leftg, &leftb, &lefta);
909  }
911  }
912  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
913  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
914  const int stride = -p->linesize[0];
915  const int fake_stride = -fake_ystride;
916  int leftr, leftg, leftb;
917 
918  put_bits(&s->pb, 8, leftr = data[0]);
919  put_bits(&s->pb, 8, leftg = data[1]);
920  put_bits(&s->pb, 8, leftb = data[2]);
921  put_bits(&s->pb, 8, 0);
922 
923  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
924  &leftr, &leftg, &leftb);
926 
927  for (int y = 1; y < height; y++) {
928  const uint8_t *dst = data + y * stride;
929  if (s->predictor == PLANE && s->interlaced < y) {
930  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
931  width * 3);
932  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
933  &leftr, &leftg, &leftb);
934  } else {
935  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
936  &leftr, &leftg, &leftb);
937  }
939  }
940  } else if (s->version > 2) {
941  int plane;
942  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
943  int left, y;
944  int w = width;
945  int h = height;
946  int fake_stride = fake_ystride;
947 
948  if (s->chroma && (plane == 1 || plane == 2)) {
949  w >>= s->chroma_h_shift;
950  h >>= s->chroma_v_shift;
951  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
952  }
953 
954  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
955 
956  encode_plane_bitstream(s, w, plane);
957 
958  if (s->predictor==MEDIAN) {
959  int lefttop;
960  y = 1;
961  if (s->interlaced) {
962  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
963 
964  encode_plane_bitstream(s, w, plane);
965  y++;
966  }
967 
968  lefttop = p->data[plane][0];
969 
970  for (; y < h; y++) {
971  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
972 
973  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
974 
975  encode_plane_bitstream(s, w, plane);
976  }
977  } else {
978  for (y = 1; y < h; y++) {
979  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
980 
981  if (s->predictor == PLANE && s->interlaced < y) {
982  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
983 
984  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
985  } else {
986  left = sub_left_prediction(s, s->temp[0], dst, w , left);
987  }
988 
989  encode_plane_bitstream(s, w, plane);
990  }
991  }
992  }
993  } else {
994  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
995  }
996  emms_c();
997 
998  size += (put_bits_count(&s->pb) + 31) / 8;
999  put_bits(&s->pb, 16, 0);
1000  put_bits(&s->pb, 15, 0);
1001  size /= 4;
1002 
1003  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1004  int j;
1005  char *p = avctx->stats_out;
1006  char *end = p + STATS_OUT_SIZE;
1007  for (i = 0; i < 4; i++) {
1008  for (j = 0; j < s->vlc_n; j++) {
1009  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1010  p += strlen(p);
1011  s->stats[i][j]= 0;
1012  }
1013  snprintf(p, end-p, "\n");
1014  p++;
1015  if (end <= p)
1016  return AVERROR(ENOMEM);
1017  }
1018  } else if (avctx->stats_out)
1019  avctx->stats_out[0] = '\0';
1020  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
1021  flush_put_bits(&s->pb);
1022  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
1023  }
1024 
1025  s->picture_number++;
1026 
1027  pkt->size = size * 4;
1028  *got_packet = 1;
1029 
1030  return 0;
1031 }
1032 
1034 {
1035  HYuvEncContext *s = avctx->priv_data;
1036 
1037  ff_huffyuv_common_end(s->temp, s->temp16);
1038 
1039  av_freep(&avctx->stats_out);
1040 
1041  return 0;
1042 }
1043 
1044 #define OFFSET(x) offsetof(HYuvEncContext, x)
1045 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1046 
1047 #define COMMON_OPTIONS \
1048  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1049  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 }, \
1050  0, 1, VE }, \
1051  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, .unit = "pred" }, \
1052  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, .unit = "pred" }, \
1053  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, .unit = "pred" }, \
1054  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, .unit = "pred" }, \
1055 
1056 static const AVOption normal_options[] = {
1058  { NULL },
1059 };
1060 
1061 static const AVOption ff_options[] = {
1063  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1064  { NULL },
1065 };
1066 
1067 static const AVClass normal_class = {
1068  .class_name = "huffyuv",
1069  .item_name = av_default_item_name,
1070  .option = normal_options,
1071  .version = LIBAVUTIL_VERSION_INT,
1072 };
1073 
1074 static const AVClass ff_class = {
1075  .class_name = "ffvhuff",
1076  .item_name = av_default_item_name,
1077  .option = ff_options,
1078  .version = LIBAVUTIL_VERSION_INT,
1079 };
1080 
1082  .p.name = "huffyuv",
1083  CODEC_LONG_NAME("Huffyuv / HuffYUV"),
1084  .p.type = AVMEDIA_TYPE_VIDEO,
1085  .p.id = AV_CODEC_ID_HUFFYUV,
1086  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1088  .priv_data_size = sizeof(HYuvEncContext),
1089  .init = encode_init,
1091  .close = encode_end,
1092  .p.priv_class = &normal_class,
1093  .p.pix_fmts = (const enum AVPixelFormat[]){
1096  },
1097  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1098 };
1099 
1100 #if CONFIG_FFVHUFF_ENCODER
1101 const FFCodec ff_ffvhuff_encoder = {
1102  .p.name = "ffvhuff",
1103  CODEC_LONG_NAME("Huffyuv FFmpeg variant"),
1104  .p.type = AVMEDIA_TYPE_VIDEO,
1105  .p.id = AV_CODEC_ID_FFVHUFF,
1106  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1108  .priv_data_size = sizeof(HYuvEncContext),
1109  .init = encode_init,
1111  .close = encode_end,
1112  .p.priv_class = &ff_class,
1113  .p.pix_fmts = (const enum AVPixelFormat[]){
1129  },
1130  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1131 };
1132 #endif
HYuvEncContext::chroma_h_shift
int chroma_h_shift
Definition: huffyuvenc.c:61
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:522
A
#define A(x)
Definition: vpx_arith.h:28
bswapdsp.h
HYuvEncContext::flags
int flags
Definition: huffyuvenc.c:63
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
store_huffman_tables
static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
Definition: huffyuvenc.c:219
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:77
encode_422_bitstream
static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
Definition: huffyuvenc.c:440
HYuvEncContext::stats
uint64_t stats[4][MAX_VLC_N]
Definition: huffyuvenc.c:69
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:119
src1
const pixel * src1
Definition: h264pred_template.c:421
HYuvEncContext::chroma_v_shift
int chroma_v_shift
Definition: huffyuvenc.c:62
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:50
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:1044
encode_gray_bitstream
static int encode_gray_bitstream(HYuvEncContext *s, int count)
Definition: huffyuvenc.c:657
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
Definition: huffyuvenc.c:702
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:241
store_table
static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:194
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:514
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
w
uint8_t w
Definition: llviddspenc.c:38
ff_class
static const AVClass ff_class
Definition: huffyuvenc.c:1074
AVPacket::data
uint8_t * data
Definition: packet.h:522
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:516
AVOption
AVOption.
Definition: opt.h:346
HYuvEncContext::interlaced
int interlaced
Definition: huffyuvenc.c:51
encode.h
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
FFCodec
Definition: codec_internal.h:127
STATS_OUT_SIZE
#define STATS_OUT_SIZE
MEDIAN
@ MEDIAN
Definition: huffyuv.h:55
encode_plane_bitstream
static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
Definition: huffyuvenc.c:495
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
HYuvEncContext::temp16
uint16_t * temp16[3]
identical to temp but 16bit type
Definition: huffyuvenc.c:68
HYuvEncContext::yuv
int yuv
Definition: huffyuvenc.c:60
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:517
HYuvEncContext::bitstream_bpp
int bitstream_bpp
Definition: huffyuvenc.c:53
HYuvEncContext::decorrelate
int decorrelate
Definition: huffyuvenc.c:52
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:513
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
v0
#define v0
Definition: regdef.h:26
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:155
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
HYuvEncContext::vlc_n
int vlc_n
Definition: huffyuvenc.c:57
HYuvEncContext::non_determ
int non_determ
Definition: huffyuvenc.c:75
LOAD2
#define LOAD2
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(uint8_t *temp[3], uint16_t *temp16[3], int width)
Definition: huffyuv.c:63
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:361
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:296
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
mask
static const uint16_t mask[17]
Definition: lzw.c:38
emms_c
#define emms_c()
Definition: emms.h:63
STATEND
#define STATEND
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
WRITE2
#define WRITE2
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1342
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
g
const char * g
Definition: vf_curves.c:127
HYuvEncContext::pb
PutBitContext pb
Definition: huffyuvenc.c:49
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1067
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
HuffYUVEncDSPContext
Definition: huffyuvencdsp.h:26
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:119
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(uint8_t *temp[3], uint16_t *temp16[3])
Definition: huffyuv.c:76
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:49
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
LOADEND_16
#define LOADEND_16
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
WRITEEND
#define WRITEEND
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
ff_huffyuv_encoder
const FFCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1081
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:40
HYuvEncContext::context
int context
Definition: huffyuvenc.c:64
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
HYuvEncContext::len
uint8_t len[4][MAX_VLC_N]
Definition: huffyuvenc.c:70
index
int index
Definition: gxfenc.c:89
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:1033
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:100
LOAD2_16
#define LOAD2_16
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
HYuvEncContext::temp
uint8_t * temp[3]
Definition: huffyuvenc.c:67
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
codec_internal.h
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
HYuvEncContext
Definition: huffyuvenc.c:46
HYuvEncContext::n
int n
Definition: huffyuvenc.c:56
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
height
#define height
Predictor
Definition: ratecontrol.h:35
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:518
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: huffyuvenc.c:1047
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
VE
#define VE
Definition: huffyuvenc.c:1045
HYuvEncContext::chroma
int chroma
Definition: huffyuvenc.c:59
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1567
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
normal_options
static const AVOption normal_options[]
Definition: huffyuvenc.c:1056
HYuvEncContext::alpha
int alpha
Definition: huffyuvenc.c:58
sub_left_prediction
static int sub_left_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:88
src2
const pixel * src2
Definition: h264pred_template.c:422
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
HYuvEncContext::bps
int bps
Definition: huffyuvenc.c:55
HYuvEncContext::version
int version
Definition: huffyuvenc.c:54
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HYuvEncContext::bdsp
BswapDSPContext bdsp
Definition: huffyuvenc.c:72
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:54
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
sub_median_prediction
static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:183
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
HYuvEncContext::hencdsp
HuffYUVEncDSPContext hencdsp
Definition: huffyuvenc.c:73
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:515
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
HYuvEncContext::picture_number
int picture_number
Definition: huffyuvenc.c:65
AVCodecContext
main external API structure.
Definition: avcodec.h:445
LOAD_GBRA
#define LOAD_GBRA
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
huffman.h
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_options
static const AVOption ff_options[]
Definition: huffyuvenc.c:1061
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: huffyuvenc.c:752
HYuvEncContext::predictor
Predictor predictor
Definition: huffyuvenc.c:50
src0
const pixel *const src0
Definition: h264pred_template.c:420
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_ffvhuff_encoder
const FFCodec ff_ffvhuff_encoder
planes
static const struct @386 planes[]
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvencdsp.c:71
d
d
Definition: ffmpeg_filter.c:409
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HYuvEncContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: huffyuvenc.c:74
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
huffyuv.h
diff_bytes
static void diff_bytes(HYuvEncContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:78
put_bits.h
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
HYuvEncContext::avctx
AVCodecContext * avctx
Definition: huffyuvenc.c:48
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
STAT_BGRA
#define STAT_BGRA
HYuvEncContext::bits
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuvenc.c:71
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310