FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see https://multimedia.cx/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "config_components.h"
32 
33 #include "avcodec.h"
34 #include "bswapdsp.h"
35 #include "codec_internal.h"
36 #include "encode.h"
37 #include "huffyuv.h"
38 #include "huffman.h"
39 #include "huffyuvencdsp.h"
40 #include "lossless_videoencdsp.h"
41 #include "put_bits.h"
42 #include "libavutil/opt.h"
43 #include "libavutil/pixdesc.h"
44 
45 typedef struct HYuvEncContext {
46  AVClass *class;
53  int version;
54  int bps;
55  int n; // 1<<bps
56  int vlc_n; // number of vlc codes (FFMIN(1<<bps, MAX_VLC_N))
57  int alpha;
58  int chroma;
59  int yuv;
62  int flags;
63  int context;
65 
66  uint8_t *temp[3];
67  uint16_t *temp16[3]; ///< identical to temp but 16bit type
68  uint64_t stats[4][MAX_VLC_N];
69  uint8_t len[4][MAX_VLC_N];
70  uint32_t bits[4][MAX_VLC_N];
74  int non_determ; // non-deterministic, multi-threaded encoder allowed
76 
77 static inline void diff_bytes(HYuvEncContext *s, uint8_t *dst,
78  const uint8_t *src0, const uint8_t *src1, int w)
79 {
80  if (s->bps <= 8) {
81  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
82  } else {
83  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
84  }
85 }
86 
87 static inline int sub_left_prediction(HYuvEncContext *s, uint8_t *dst,
88  const uint8_t *src, int w, int left)
89 {
90  int i;
91  int min_width = FFMIN(w, 32);
92 
93  if (s->bps <= 8) {
94  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
95  const int temp = src[i];
96  dst[i] = temp - left;
97  left = temp;
98  }
99  if (w < 32)
100  return left;
101  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
102  return src[w-1];
103  } else {
104  const uint16_t *src16 = (const uint16_t *)src;
105  uint16_t *dst16 = ( uint16_t *)dst;
106  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
107  const int temp = src16[i];
108  dst16[i] = temp - left;
109  left = temp;
110  }
111  if (w < 32)
112  return left;
113  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
114  return src16[w-1];
115  }
116 }
117 
118 static inline void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst,
119  const uint8_t *src, int w,
120  int *red, int *green, int *blue,
121  int *alpha)
122 {
123  int i;
124  int r, g, b, a;
125  int min_width = FFMIN(w, 8);
126  r = *red;
127  g = *green;
128  b = *blue;
129  a = *alpha;
130 
131  for (i = 0; i < min_width; i++) {
132  const int rt = src[i * 4 + R];
133  const int gt = src[i * 4 + G];
134  const int bt = src[i * 4 + B];
135  const int at = src[i * 4 + A];
136  dst[i * 4 + R] = rt - r;
137  dst[i * 4 + G] = gt - g;
138  dst[i * 4 + B] = bt - b;
139  dst[i * 4 + A] = at - a;
140  r = rt;
141  g = gt;
142  b = bt;
143  a = at;
144  }
145 
146  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
147 
148  *red = src[(w - 1) * 4 + R];
149  *green = src[(w - 1) * 4 + G];
150  *blue = src[(w - 1) * 4 + B];
151  *alpha = src[(w - 1) * 4 + A];
152 }
153 
154 static inline void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst,
155  const uint8_t *src, int w,
156  int *red, int *green, int *blue)
157 {
158  int i;
159  int r, g, b;
160  r = *red;
161  g = *green;
162  b = *blue;
163  for (i = 0; i < FFMIN(w, 16); i++) {
164  const int rt = src[i * 3 + 0];
165  const int gt = src[i * 3 + 1];
166  const int bt = src[i * 3 + 2];
167  dst[i * 3 + 0] = rt - r;
168  dst[i * 3 + 1] = gt - g;
169  dst[i * 3 + 2] = bt - b;
170  r = rt;
171  g = gt;
172  b = bt;
173  }
174 
175  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
176 
177  *red = src[(w - 1) * 3 + 0];
178  *green = src[(w - 1) * 3 + 1];
179  *blue = src[(w - 1) * 3 + 2];
180 }
181 
182 static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst,
183  const uint8_t *src1, const uint8_t *src2,
184  int w, int *left, int *left_top)
185 {
186  if (s->bps <= 8) {
187  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
188  } else {
189  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
190  }
191 }
192 
193 static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
194 {
195  int i;
196  int index = 0;
197  int n = s->vlc_n;
198 
199  for (i = 0; i < n;) {
200  int val = len[i];
201  int repeat = 0;
202 
203  for (; i < n && len[i] == val && repeat < 255; i++)
204  repeat++;
205 
206  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
207  if (repeat > 7) {
208  buf[index++] = val;
209  buf[index++] = repeat;
210  } else {
211  buf[index++] = val | (repeat << 5);
212  }
213  }
214 
215  return index;
216 }
217 
218 static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
219 {
220  int i, ret;
221  int size = 0;
222  int count = 3;
223 
224  if (s->version > 2)
225  count = 1 + s->alpha + 2*s->chroma;
226 
227  for (i = 0; i < count; i++) {
228  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
229  return ret;
230 
231  if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
232  return -1;
233  }
234 
235  size += store_table(s, s->len[i], buf + size);
236  }
237  return size;
238 }
239 
241 {
242  HYuvEncContext *s = avctx->priv_data;
243  int i, j;
244  int ret;
245  const AVPixFmtDescriptor *desc;
246 
247  s->avctx = avctx;
248  s->flags = avctx->flags;
249 
250  ff_bswapdsp_init(&s->bdsp);
251  ff_huffyuvencdsp_init(&s->hencdsp, avctx->pix_fmt);
252  ff_llvidencdsp_init(&s->llvidencdsp);
253 
254  avctx->extradata = av_mallocz(3*MAX_N + 4);
255  if (!avctx->extradata)
256  return AVERROR(ENOMEM);
257  if (s->flags&AV_CODEC_FLAG_PASS1) {
258 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
259  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
260  if (!avctx->stats_out)
261  return AVERROR(ENOMEM);
262  }
263  s->version = 2;
264 
265  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
266  s->bps = desc->comp[0].depth;
267  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
268  s->chroma = desc->nb_components > 2;
269  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
270  s->chroma_h_shift = desc->log2_chroma_w;
271  s->chroma_v_shift = desc->log2_chroma_h;
272 
273  switch (avctx->pix_fmt) {
274  case AV_PIX_FMT_YUV420P:
275  case AV_PIX_FMT_YUV422P:
276  if (avctx->width & 1) {
277  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
278  return AVERROR(EINVAL);
279  }
280  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
281  break;
282  case AV_PIX_FMT_YUV444P:
283  case AV_PIX_FMT_YUV410P:
284  case AV_PIX_FMT_YUV411P:
285  case AV_PIX_FMT_YUV440P:
286  case AV_PIX_FMT_GBRP:
287  case AV_PIX_FMT_GBRP9:
288  case AV_PIX_FMT_GBRP10:
289  case AV_PIX_FMT_GBRP12:
290  case AV_PIX_FMT_GBRP14:
291  case AV_PIX_FMT_GBRP16:
292  case AV_PIX_FMT_GRAY8:
293  case AV_PIX_FMT_GRAY16:
294  case AV_PIX_FMT_YUVA444P:
295  case AV_PIX_FMT_YUVA420P:
296  case AV_PIX_FMT_YUVA422P:
297  case AV_PIX_FMT_GBRAP:
298  case AV_PIX_FMT_YUV420P9:
303  case AV_PIX_FMT_YUV422P9:
308  case AV_PIX_FMT_YUV444P9:
322  s->version = 3;
323  break;
324  case AV_PIX_FMT_RGB32:
325  s->bitstream_bpp = 32;
326  break;
327  case AV_PIX_FMT_RGB24:
328  s->bitstream_bpp = 24;
329  break;
330  default:
331  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
332  return AVERROR(EINVAL);
333  }
334  s->n = 1<<s->bps;
335  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
336 
337  avctx->bits_per_coded_sample = s->bitstream_bpp;
338  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
339  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
340  if (s->context) {
341  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
342  av_log(avctx, AV_LOG_ERROR,
343  "context=1 is not compatible with "
344  "2 pass huffyuv encoding\n");
345  return AVERROR(EINVAL);
346  }
347  }
348 
349  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
350  if (s->interlaced != ( avctx->height > 288 ))
351  av_log(avctx, AV_LOG_INFO,
352  "using huffyuv 2.2.0 or newer interlacing flag\n");
353  }
354 
355  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
356  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
357  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
358  return AVERROR(EINVAL);
359  }
360 
361  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
362  av_log(avctx, AV_LOG_ERROR,
363  "Error: RGB is incompatible with median predictor\n");
364  return AVERROR(EINVAL);
365  }
366 
367  avctx->extradata[0] = s->predictor | (s->decorrelate << 6);
368  avctx->extradata[2] = s->interlaced ? 0x10 : 0x20;
369  if (s->context)
370  avctx->extradata[2] |= 0x40;
371  if (s->version < 3) {
372  avctx->extradata[1] = s->bitstream_bpp;
373  avctx->extradata[3] = 0;
374  } else {
375  avctx->extradata[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
376  if (s->chroma)
377  avctx->extradata[2] |= s->yuv ? 1 : 2;
378  if (s->alpha)
379  avctx->extradata[2] |= 4;
380  avctx->extradata[3] = 1;
381  }
382  avctx->extradata_size = 4;
383 
384  if (avctx->stats_in) {
385  char *p = avctx->stats_in;
386 
387  for (i = 0; i < 4; i++)
388  for (j = 0; j < s->vlc_n; j++)
389  s->stats[i][j] = 1;
390 
391  for (;;) {
392  for (i = 0; i < 4; i++) {
393  char *next;
394 
395  for (j = 0; j < s->vlc_n; j++) {
396  s->stats[i][j] += strtol(p, &next, 0);
397  if (next == p) return -1;
398  p = next;
399  }
400  }
401  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
402  }
403  } else {
404  for (i = 0; i < 4; i++)
405  for (j = 0; j < s->vlc_n; j++) {
406  int d = FFMIN(j, s->vlc_n - j);
407 
408  s->stats[i][j] = 100000000 / (d*d + 1);
409  }
410  }
411 
412  ret = store_huffman_tables(s, avctx->extradata + avctx->extradata_size);
413  if (ret < 0)
414  return ret;
415  avctx->extradata_size += ret;
416 
417  if (s->context) {
418  for (i = 0; i < 4; i++) {
419  int pels = avctx->width * avctx->height / (i ? 40 : 10);
420  for (j = 0; j < s->vlc_n; j++) {
421  int d = FFMIN(j, s->vlc_n - j);
422  s->stats[i][j] = pels/(d*d + 1);
423  }
424  }
425  } else {
426  for (i = 0; i < 4; i++)
427  for (j = 0; j < s->vlc_n; j++)
428  s->stats[i][j]= 0;
429  }
430 
431  ret = ff_huffyuv_alloc_temp(s->temp, s->temp16, avctx->width);
432  if (ret < 0)
433  return ret;
434 
435  s->picture_number=0;
436 
437  return 0;
438 }
439 static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
440 {
441  int i;
442  const uint8_t *y = s->temp[0] + offset;
443  const uint8_t *u = s->temp[1] + offset / 2;
444  const uint8_t *v = s->temp[2] + offset / 2;
445 
446  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
447  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
448  return -1;
449  }
450 
451 #define LOAD4\
452  int y0 = y[2 * i];\
453  int y1 = y[2 * i + 1];\
454  int u0 = u[i];\
455  int v0 = v[i];
456 
457  count /= 2;
458 
459  if (s->flags & AV_CODEC_FLAG_PASS1) {
460  for(i = 0; i < count; i++) {
461  LOAD4;
462  s->stats[0][y0]++;
463  s->stats[1][u0]++;
464  s->stats[0][y1]++;
465  s->stats[2][v0]++;
466  }
467  }
468  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
469  return 0;
470  if (s->context) {
471  for (i = 0; i < count; i++) {
472  LOAD4;
473  s->stats[0][y0]++;
474  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
475  s->stats[1][u0]++;
476  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
477  s->stats[0][y1]++;
478  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
479  s->stats[2][v0]++;
480  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
481  }
482  } else {
483  for(i = 0; i < count; i++) {
484  LOAD4;
485  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
486  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
487  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
488  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
489  }
490  }
491  return 0;
492 }
493 
494 static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
495 {
496  int i, count = width/2;
497 
498  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
499  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
500  return -1;
501  }
502 
503 #define LOADEND\
504  int y0 = s->temp[0][width-1];
505 #define LOADEND_14\
506  int y0 = s->temp16[0][width-1] & mask;
507 #define LOADEND_16\
508  int y0 = s->temp16[0][width-1];
509 #define STATEND\
510  s->stats[plane][y0]++;
511 #define STATEND_16\
512  s->stats[plane][y0>>2]++;
513 #define WRITEEND\
514  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
515 #define WRITEEND_16\
516  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
517  put_bits(&s->pb, 2, y0&3);
518 
519 #define LOAD2\
520  int y0 = s->temp[0][2 * i];\
521  int y1 = s->temp[0][2 * i + 1];
522 #define LOAD2_14\
523  int y0 = s->temp16[0][2 * i] & mask;\
524  int y1 = s->temp16[0][2 * i + 1] & mask;
525 #define LOAD2_16\
526  int y0 = s->temp16[0][2 * i];\
527  int y1 = s->temp16[0][2 * i + 1];
528 #define STAT2\
529  s->stats[plane][y0]++;\
530  s->stats[plane][y1]++;
531 #define STAT2_16\
532  s->stats[plane][y0>>2]++;\
533  s->stats[plane][y1>>2]++;
534 #define WRITE2\
535  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
536  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
537 #define WRITE2_16\
538  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
539  put_bits(&s->pb, 2, y0&3);\
540  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
541  put_bits(&s->pb, 2, y1&3);
542 
543  if (s->bps <= 8) {
544  if (s->flags & AV_CODEC_FLAG_PASS1) {
545  for (i = 0; i < count; i++) {
546  LOAD2;
547  STAT2;
548  }
549  if (width&1) {
550  LOADEND;
551  STATEND;
552  }
553  }
554  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
555  return 0;
556 
557  if (s->context) {
558  for (i = 0; i < count; i++) {
559  LOAD2;
560  STAT2;
561  WRITE2;
562  }
563  if (width&1) {
564  LOADEND;
565  STATEND;
566  WRITEEND;
567  }
568  } else {
569  for (i = 0; i < count; i++) {
570  LOAD2;
571  WRITE2;
572  }
573  if (width&1) {
574  LOADEND;
575  WRITEEND;
576  }
577  }
578  } else if (s->bps <= 14) {
579  int mask = s->n - 1;
580  if (s->flags & AV_CODEC_FLAG_PASS1) {
581  for (i = 0; i < count; i++) {
582  LOAD2_14;
583  STAT2;
584  }
585  if (width&1) {
586  LOADEND_14;
587  STATEND;
588  }
589  }
590  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
591  return 0;
592 
593  if (s->context) {
594  for (i = 0; i < count; i++) {
595  LOAD2_14;
596  STAT2;
597  WRITE2;
598  }
599  if (width&1) {
600  LOADEND_14;
601  STATEND;
602  WRITEEND;
603  }
604  } else {
605  for (i = 0; i < count; i++) {
606  LOAD2_14;
607  WRITE2;
608  }
609  if (width&1) {
610  LOADEND_14;
611  WRITEEND;
612  }
613  }
614  } else {
615  if (s->flags & AV_CODEC_FLAG_PASS1) {
616  for (i = 0; i < count; i++) {
617  LOAD2_16;
618  STAT2_16;
619  }
620  if (width&1) {
621  LOADEND_16;
622  STATEND_16;
623  }
624  }
625  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
626  return 0;
627 
628  if (s->context) {
629  for (i = 0; i < count; i++) {
630  LOAD2_16;
631  STAT2_16;
632  WRITE2_16;
633  }
634  if (width&1) {
635  LOADEND_16;
636  STATEND_16;
637  WRITEEND_16;
638  }
639  } else {
640  for (i = 0; i < count; i++) {
641  LOAD2_16;
642  WRITE2_16;
643  }
644  if (width&1) {
645  LOADEND_16;
646  WRITEEND_16;
647  }
648  }
649  }
650 #undef LOAD2
651 #undef STAT2
652 #undef WRITE2
653  return 0;
654 }
655 
656 static int encode_gray_bitstream(HYuvEncContext *s, int count)
657 {
658  int i;
659 
660  if (put_bytes_left(&s->pb, 0) < 4 * count) {
661  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
662  return -1;
663  }
664 
665 #define LOAD2\
666  int y0 = s->temp[0][2 * i];\
667  int y1 = s->temp[0][2 * i + 1];
668 #define STAT2\
669  s->stats[0][y0]++;\
670  s->stats[0][y1]++;
671 #define WRITE2\
672  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
673  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
674 
675  count /= 2;
676 
677  if (s->flags & AV_CODEC_FLAG_PASS1) {
678  for (i = 0; i < count; i++) {
679  LOAD2;
680  STAT2;
681  }
682  }
683  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
684  return 0;
685 
686  if (s->context) {
687  for (i = 0; i < count; i++) {
688  LOAD2;
689  STAT2;
690  WRITE2;
691  }
692  } else {
693  for (i = 0; i < count; i++) {
694  LOAD2;
695  WRITE2;
696  }
697  }
698  return 0;
699 }
700 
701 static inline int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
702 {
703  int i;
704 
705  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
706  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
707  return -1;
708  }
709 
710 #define LOAD_GBRA \
711  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
712  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
713  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
714  int a = s->temp[0][planes * i + A];
715 
716 #define STAT_BGRA \
717  s->stats[0][b]++; \
718  s->stats[1][g]++; \
719  s->stats[2][r]++; \
720  if (planes == 4) \
721  s->stats[2][a]++;
722 
723 #define WRITE_GBRA \
724  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
725  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
726  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
727  if (planes == 4) \
728  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
729 
730  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
731  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
732  for (i = 0; i < count; i++) {
733  LOAD_GBRA;
734  STAT_BGRA;
735  }
736  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
737  for (i = 0; i < count; i++) {
738  LOAD_GBRA;
739  STAT_BGRA;
740  WRITE_GBRA;
741  }
742  } else {
743  for (i = 0; i < count; i++) {
744  LOAD_GBRA;
745  WRITE_GBRA;
746  }
747  }
748  return 0;
749 }
750 
752  const AVFrame *pict, int *got_packet)
753 {
754  HYuvEncContext *s = avctx->priv_data;
755  const int width = avctx->width;
756  const int width2 = avctx->width >> 1;
757  const int height = avctx->height;
758  const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
759  const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
760  const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
761  const AVFrame * const p = pict;
762  int i, j, size = 0, ret;
763 
764  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0)
765  return ret;
766 
767  if (s->context) {
769  if (size < 0)
770  return size;
771 
772  for (i = 0; i < 4; i++)
773  for (j = 0; j < s->vlc_n; j++)
774  s->stats[i][j] >>= 1;
775  }
776 
777  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
778 
779  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
780  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
781  int lefty, leftu, leftv, y, cy;
782 
783  put_bits(&s->pb, 8, leftv = p->data[2][0]);
784  put_bits(&s->pb, 8, lefty = p->data[0][1]);
785  put_bits(&s->pb, 8, leftu = p->data[1][0]);
786  put_bits(&s->pb, 8, p->data[0][0]);
787 
788  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
789  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
790  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
791 
793 
794  if (s->predictor==MEDIAN) {
795  int lefttopy, lefttopu, lefttopv;
796  cy = y = 1;
797  if (s->interlaced) {
798  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
799  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
800  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
801 
803  y++; cy++;
804  }
805 
806  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
807  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
808  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
809 
810  encode_422_bitstream(s, 0, 4);
811 
812  lefttopy = p->data[0][3];
813  lefttopu = p->data[1][1];
814  lefttopv = p->data[2][1];
815  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
816  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
817  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
818  encode_422_bitstream(s, 0, width - 4);
819  y++; cy++;
820 
821  for (; y < height; y++,cy++) {
822  const uint8_t *ydst, *udst, *vdst;
823 
824  if (s->bitstream_bpp == 12) {
825  while (2 * cy > y) {
826  ydst = p->data[0] + p->linesize[0] * y;
827  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
829  y++;
830  }
831  if (y >= height) break;
832  }
833  ydst = p->data[0] + p->linesize[0] * y;
834  udst = p->data[1] + p->linesize[1] * cy;
835  vdst = p->data[2] + p->linesize[2] * cy;
836 
837  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
838  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
839  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
840 
842  }
843  } else {
844  for (cy = y = 1; y < height; y++, cy++) {
845  const uint8_t *ydst, *udst, *vdst;
846 
847  /* encode a luma only line & y++ */
848  if (s->bitstream_bpp == 12) {
849  ydst = p->data[0] + p->linesize[0] * y;
850 
851  if (s->predictor == PLANE && s->interlaced < y) {
852  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
853 
854  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
855  } else {
856  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
857  }
859  y++;
860  if (y >= height) break;
861  }
862 
863  ydst = p->data[0] + p->linesize[0] * y;
864  udst = p->data[1] + p->linesize[1] * cy;
865  vdst = p->data[2] + p->linesize[2] * cy;
866 
867  if (s->predictor == PLANE && s->interlaced < cy) {
868  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
869  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
870  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
871 
872  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
873  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
874  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
875  } else {
876  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
877  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
878  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
879  }
880 
882  }
883  }
884  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
885  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
886  const int stride = -p->linesize[0];
887  const int fake_stride = -fake_ystride;
888  int leftr, leftg, leftb, lefta;
889 
890  put_bits(&s->pb, 8, lefta = data[A]);
891  put_bits(&s->pb, 8, leftr = data[R]);
892  put_bits(&s->pb, 8, leftg = data[G]);
893  put_bits(&s->pb, 8, leftb = data[B]);
894 
895  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
896  &leftr, &leftg, &leftb, &lefta);
897  encode_bgra_bitstream(s, width - 1, 4);
898 
899  for (int y = 1; y < height; y++) {
900  const uint8_t *dst = data + y*stride;
901  if (s->predictor == PLANE && s->interlaced < y) {
902  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
903  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
904  &leftr, &leftg, &leftb, &lefta);
905  } else {
906  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
907  &leftr, &leftg, &leftb, &lefta);
908  }
910  }
911  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
912  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
913  const int stride = -p->linesize[0];
914  const int fake_stride = -fake_ystride;
915  int leftr, leftg, leftb;
916 
917  put_bits(&s->pb, 8, leftr = data[0]);
918  put_bits(&s->pb, 8, leftg = data[1]);
919  put_bits(&s->pb, 8, leftb = data[2]);
920  put_bits(&s->pb, 8, 0);
921 
922  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
923  &leftr, &leftg, &leftb);
925 
926  for (int y = 1; y < height; y++) {
927  const uint8_t *dst = data + y * stride;
928  if (s->predictor == PLANE && s->interlaced < y) {
929  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
930  width * 3);
931  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
932  &leftr, &leftg, &leftb);
933  } else {
934  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
935  &leftr, &leftg, &leftb);
936  }
938  }
939  } else if (s->version > 2) {
940  int plane;
941  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
942  int left, y;
943  int w = width;
944  int h = height;
945  int fake_stride = fake_ystride;
946 
947  if (s->chroma && (plane == 1 || plane == 2)) {
948  w >>= s->chroma_h_shift;
949  h >>= s->chroma_v_shift;
950  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
951  }
952 
953  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
954 
955  encode_plane_bitstream(s, w, plane);
956 
957  if (s->predictor==MEDIAN) {
958  int lefttop;
959  y = 1;
960  if (s->interlaced) {
961  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
962 
963  encode_plane_bitstream(s, w, plane);
964  y++;
965  }
966 
967  lefttop = p->data[plane][0];
968 
969  for (; y < h; y++) {
970  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
971 
972  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
973 
974  encode_plane_bitstream(s, w, plane);
975  }
976  } else {
977  for (y = 1; y < h; y++) {
978  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
979 
980  if (s->predictor == PLANE && s->interlaced < y) {
981  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
982 
983  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
984  } else {
985  left = sub_left_prediction(s, s->temp[0], dst, w , left);
986  }
987 
988  encode_plane_bitstream(s, w, plane);
989  }
990  }
991  }
992  } else {
993  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
994  }
995  emms_c();
996 
997  size += (put_bits_count(&s->pb) + 31) / 8;
998  put_bits(&s->pb, 16, 0);
999  put_bits(&s->pb, 15, 0);
1000  size /= 4;
1001 
1002  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1003  int j;
1004  char *p = avctx->stats_out;
1005  char *end = p + STATS_OUT_SIZE;
1006  for (i = 0; i < 4; i++) {
1007  for (j = 0; j < s->vlc_n; j++) {
1008  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1009  p += strlen(p);
1010  s->stats[i][j]= 0;
1011  }
1012  snprintf(p, end-p, "\n");
1013  p++;
1014  if (end <= p)
1015  return AVERROR(ENOMEM);
1016  }
1017  } else if (avctx->stats_out)
1018  avctx->stats_out[0] = '\0';
1019  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
1020  flush_put_bits(&s->pb);
1021  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
1022  }
1023 
1024  s->picture_number++;
1025 
1026  pkt->size = size * 4;
1027  *got_packet = 1;
1028 
1029  return 0;
1030 }
1031 
1033 {
1034  HYuvEncContext *s = avctx->priv_data;
1035 
1036  ff_huffyuv_common_end(s->temp, s->temp16);
1037 
1038  av_freep(&avctx->stats_out);
1039 
1040  return 0;
1041 }
1042 
1043 #define OFFSET(x) offsetof(HYuvEncContext, x)
1044 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1045 
1046 #define COMMON_OPTIONS \
1047  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1048  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 }, \
1049  0, 1, VE }, \
1050  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1051  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1052  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1053  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1054 
1055 static const AVOption normal_options[] = {
1057  { NULL },
1058 };
1059 
1060 static const AVOption ff_options[] = {
1062  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1063  { NULL },
1064 };
1065 
1066 static const AVClass normal_class = {
1067  .class_name = "huffyuv",
1068  .item_name = av_default_item_name,
1069  .option = normal_options,
1070  .version = LIBAVUTIL_VERSION_INT,
1071 };
1072 
1073 static const AVClass ff_class = {
1074  .class_name = "ffvhuff",
1075  .item_name = av_default_item_name,
1076  .option = ff_options,
1077  .version = LIBAVUTIL_VERSION_INT,
1078 };
1079 
1081  .p.name = "huffyuv",
1082  CODEC_LONG_NAME("Huffyuv / HuffYUV"),
1083  .p.type = AVMEDIA_TYPE_VIDEO,
1084  .p.id = AV_CODEC_ID_HUFFYUV,
1085  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1087  .priv_data_size = sizeof(HYuvEncContext),
1088  .init = encode_init,
1090  .close = encode_end,
1091  .p.priv_class = &normal_class,
1092  .p.pix_fmts = (const enum AVPixelFormat[]){
1095  },
1096  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1097 };
1098 
1099 #if CONFIG_FFVHUFF_ENCODER
1100 const FFCodec ff_ffvhuff_encoder = {
1101  .p.name = "ffvhuff",
1102  CODEC_LONG_NAME("Huffyuv FFmpeg variant"),
1103  .p.type = AVMEDIA_TYPE_VIDEO,
1104  .p.id = AV_CODEC_ID_FFVHUFF,
1105  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1107  .priv_data_size = sizeof(HYuvEncContext),
1108  .init = encode_init,
1110  .close = encode_end,
1111  .p.priv_class = &ff_class,
1112  .p.pix_fmts = (const enum AVPixelFormat[]){
1128  },
1129  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1130 };
1131 #endif
HYuvEncContext::chroma_h_shift
int chroma_h_shift
Definition: huffyuvenc.c:60
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:502
A
#define A(x)
Definition: vpx_arith.h:28
bswapdsp.h
HYuvEncContext::flags
int flags
Definition: huffyuvenc.c:62
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
planes
static const struct @346 planes[]
store_huffman_tables
static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
Definition: huffyuvenc.c:218
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:77
encode_422_bitstream
static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
Definition: huffyuvenc.c:439
HYuvEncContext::stats
uint64_t stats[4][MAX_VLC_N]
Definition: huffyuvenc.c:68
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:118
src1
const pixel * src1
Definition: h264pred_template.c:421
HYuvEncContext::chroma_v_shift
int chroma_v_shift
Definition: huffyuvenc.c:61
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:50
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:1043
encode_gray_bitstream
static int encode_gray_bitstream(HYuvEncContext *s, int count)
Definition: huffyuvenc.c:656
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
Definition: huffyuvenc.c:701
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:240
store_table
static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:193
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:494
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:501
w
uint8_t w
Definition: llviddspenc.c:38
ff_class
static const AVClass ff_class
Definition: huffyuvenc.c:1073
AVPacket::data
uint8_t * data
Definition: packet.h:374
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:496
AVOption
AVOption.
Definition: opt.h:251
HYuvEncContext::interlaced
int interlaced
Definition: huffyuvenc.c:50
encode.h
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:146
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
FFCodec
Definition: codec_internal.h:127
STATS_OUT_SIZE
#define STATS_OUT_SIZE
MEDIAN
@ MEDIAN
Definition: huffyuv.h:55
encode_plane_bitstream
static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
Definition: huffyuvenc.c:494
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
HYuvEncContext::temp16
uint16_t * temp16[3]
identical to temp but 16bit type
Definition: huffyuvenc.c:67
HYuvEncContext::yuv
int yuv
Definition: huffyuvenc.c:59
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:330
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:497
HYuvEncContext::bitstream_bpp
int bitstream_bpp
Definition: huffyuvenc.c:52
HYuvEncContext::decorrelate
int decorrelate
Definition: huffyuvenc.c:51
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:493
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:477
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
v0
#define v0
Definition: regdef.h:26
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:154
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:503
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:457
HYuvEncContext::vlc_n
int vlc_n
Definition: huffyuvenc.c:56
HYuvEncContext::non_determ
int non_determ
Definition: huffyuvenc.c:74
LOAD2
#define LOAD2
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(uint8_t *temp[3], uint16_t *temp16[3], int width)
Definition: huffyuv.c:63
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:443
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:340
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:315
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:471
mask
static const uint16_t mask[17]
Definition: lzw.c:38
STATEND
#define STATEND
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
WRITE2
#define WRITE2
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1304
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:472
g
const char * g
Definition: vf_curves.c:127
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
HYuvEncContext::pb
PutBitContext pb
Definition: huffyuvenc.c:48
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:156
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
B
#define B
Definition: huffyuv.h:42
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1066
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:456
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:470
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
HuffYUVEncDSPContext
Definition: huffyuvencdsp.h:26
AV_INPUT_BUFFER_MIN_SIZE
#define AV_INPUT_BUFFER_MIN_SIZE
Definition: avcodec.h:191
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:119
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(uint8_t *temp[3], uint16_t *temp16[3])
Definition: huffyuv.c:76
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:49
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
LOADEND_16
#define LOADEND_16
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:478
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
WRITEEND
#define WRITEEND
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
ff_huffyuv_encoder
const FFCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1080
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:40
HYuvEncContext::context
int context
Definition: huffyuvenc.c:63
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
HYuvEncContext::len
uint8_t len[4][MAX_VLC_N]
Definition: huffyuvenc.c:69
index
int index
Definition: gxfenc.c:89
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:1032
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
LOAD2_16
#define LOAD2_16
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1296
HYuvEncContext::temp
uint8_t * temp[3]
Definition: huffyuvenc.c:66
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
HYuvEncContext
Definition: huffyuvenc.c:45
HYuvEncContext::n
int n
Definition: huffyuvenc.c:55
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:293
height
#define height
Predictor
Definition: ratecontrol.h:35
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:432
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:498
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: huffyuvenc.c:1046
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
VE
#define VE
Definition: huffyuvenc.c:1044
HYuvEncContext::chroma
int chroma
Definition: huffyuvenc.c:58
AVCodec::id
enum AVCodecID id
Definition: codec.h:198
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1480
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
normal_options
static const AVOption normal_options[]
Definition: huffyuvenc.c:1055
HYuvEncContext::alpha
int alpha
Definition: huffyuvenc.c:57
sub_left_prediction
static int sub_left_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:87
src2
const pixel * src2
Definition: h264pred_template.c:422
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
HYuvEncContext::bps
int bps
Definition: huffyuvenc.c:54
HYuvEncContext::version
int version
Definition: huffyuvenc.c:53
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HYuvEncContext::bdsp
BswapDSPContext bdsp
Definition: huffyuvenc.c:71
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:54
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:458
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
sub_median_prediction
static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:182
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1341
HYuvEncContext::hencdsp
HuffYUVEncDSPContext hencdsp
Definition: huffyuvenc.c:72
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:495
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:468
HYuvEncContext::picture_number
int picture_number
Definition: huffyuvenc.c:64
AVCodecContext
main external API structure.
Definition: avcodec.h:426
LOAD_GBRA
#define LOAD_GBRA
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
huffman.h
temp
else temp
Definition: vf_mcdeint.c:248
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_options
static const AVOption ff_options[]
Definition: huffyuvenc.c:1060
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: huffyuvenc.c:751
HYuvEncContext::predictor
Predictor predictor
Definition: huffyuvenc.c:49
src0
const pixel *const src0
Definition: h264pred_template.c:420
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_ffvhuff_encoder
const FFCodec ff_ffvhuff_encoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvencdsp.c:71
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HYuvEncContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: huffyuvenc.c:73
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:469
huffyuv.h
diff_bytes
static void diff_bytes(HYuvEncContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:77
put_bits.h
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
HYuvEncContext::avctx
AVCodecContext * avctx
Definition: huffyuvenc.c:47
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:467
STAT_BGRA
#define STAT_BGRA
HYuvEncContext::bits
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuvenc.c:70
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:289