FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see https://multimedia.cx/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "config_components.h"
32 
33 #include "avcodec.h"
34 #include "bswapdsp.h"
35 #include "codec_internal.h"
36 #include "encode.h"
37 #include "huffyuv.h"
38 #include "huffman.h"
39 #include "huffyuvencdsp.h"
40 #include "lossless_videoencdsp.h"
41 #include "put_bits.h"
42 #include "libavutil/emms.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/pixdesc.h"
46 
47 typedef struct HYuvEncContext {
48  AVClass *class;
55  int version;
56  int bps;
57  int n; // 1<<bps
58  int vlc_n; // number of vlc codes (FFMIN(1<<bps, MAX_VLC_N))
59  int alpha;
60  int chroma;
61  int yuv;
64  int flags;
65  int context;
67 
68  union {
69  uint8_t *temp[3];
70  uint16_t *temp16[3];
71  };
72  uint64_t stats[4][MAX_VLC_N];
73  uint8_t len[4][MAX_VLC_N];
74  uint32_t bits[4][MAX_VLC_N];
78  int non_determ; // non-deterministic, multi-threaded encoder allowed
80 
81 static inline void diff_bytes(HYuvEncContext *s, uint8_t *dst,
82  const uint8_t *src0, const uint8_t *src1, int w)
83 {
84  if (s->bps <= 8) {
85  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
86  } else {
87  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
88  }
89 }
90 
91 static inline int sub_left_prediction(HYuvEncContext *s, uint8_t *dst,
92  const uint8_t *src, int w, int left)
93 {
94  int i;
95  int min_width = FFMIN(w, 32);
96 
97  if (s->bps <= 8) {
98  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
99  const int temp = src[i];
100  dst[i] = temp - left;
101  left = temp;
102  }
103  if (w < 32)
104  return left;
105  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
106  return src[w-1];
107  } else {
108  const uint16_t *src16 = (const uint16_t *)src;
109  uint16_t *dst16 = ( uint16_t *)dst;
110  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
111  const int temp = src16[i];
112  dst16[i] = temp - left;
113  left = temp;
114  }
115  if (w < 32)
116  return left;
117  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
118  return src16[w-1];
119  }
120 }
121 
122 static inline void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst,
123  const uint8_t *src, int w,
124  int *red, int *green, int *blue,
125  int *alpha)
126 {
127  int i;
128  int r, g, b, a;
129  int min_width = FFMIN(w, 8);
130  r = *red;
131  g = *green;
132  b = *blue;
133  a = *alpha;
134 
135  for (i = 0; i < min_width; i++) {
136  const int rt = src[i * 4 + R];
137  const int gt = src[i * 4 + G];
138  const int bt = src[i * 4 + B];
139  const int at = src[i * 4 + A];
140  dst[i * 4 + R] = rt - r;
141  dst[i * 4 + G] = gt - g;
142  dst[i * 4 + B] = bt - b;
143  dst[i * 4 + A] = at - a;
144  r = rt;
145  g = gt;
146  b = bt;
147  a = at;
148  }
149 
150  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
151 
152  *red = src[(w - 1) * 4 + R];
153  *green = src[(w - 1) * 4 + G];
154  *blue = src[(w - 1) * 4 + B];
155  *alpha = src[(w - 1) * 4 + A];
156 }
157 
158 static inline void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst,
159  const uint8_t *src, int w,
160  int *red, int *green, int *blue)
161 {
162  int i;
163  int r, g, b;
164  r = *red;
165  g = *green;
166  b = *blue;
167  for (i = 0; i < FFMIN(w, 16); i++) {
168  const int rt = src[i * 3 + 0];
169  const int gt = src[i * 3 + 1];
170  const int bt = src[i * 3 + 2];
171  dst[i * 3 + 0] = rt - r;
172  dst[i * 3 + 1] = gt - g;
173  dst[i * 3 + 2] = bt - b;
174  r = rt;
175  g = gt;
176  b = bt;
177  }
178 
179  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
180 
181  *red = src[(w - 1) * 3 + 0];
182  *green = src[(w - 1) * 3 + 1];
183  *blue = src[(w - 1) * 3 + 2];
184 }
185 
186 static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst,
187  const uint8_t *src1, const uint8_t *src2,
188  int w, int *left, int *left_top)
189 {
190  if (s->bps <= 8) {
191  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
192  } else {
193  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
194  }
195 }
196 
197 static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
198 {
199  int i;
200  int index = 0;
201  int n = s->vlc_n;
202 
203  for (i = 0; i < n;) {
204  int val = len[i];
205  int repeat = 0;
206 
207  for (; i < n && len[i] == val && repeat < 255; i++)
208  repeat++;
209 
210  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
211  if (repeat > 7) {
212  buf[index++] = val;
213  buf[index++] = repeat;
214  } else {
215  buf[index++] = val | (repeat << 5);
216  }
217  }
218 
219  return index;
220 }
221 
222 static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
223 {
224  int i, ret;
225  int size = 0;
226  int count = 3;
227 
228  if (s->version > 2)
229  count = 1 + s->alpha + 2*s->chroma;
230 
231  for (i = 0; i < count; i++) {
232  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
233  return ret;
234 
235  ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n);
236  if (ret < 0)
237  return ret;
238 
239  size += store_table(s, s->len[i], buf + size);
240  }
241  return size;
242 }
243 
245 {
246  HYuvEncContext *s = avctx->priv_data;
247  int i, j;
248  int ret;
249  const AVPixFmtDescriptor *desc;
250 
251  s->avctx = avctx;
252  s->flags = avctx->flags;
253 
254  ff_bswapdsp_init(&s->bdsp);
255  ff_huffyuvencdsp_init(&s->hencdsp, avctx->pix_fmt);
256  ff_llvidencdsp_init(&s->llvidencdsp);
257 
258  avctx->extradata = av_mallocz(3*MAX_N + 4);
259  if (!avctx->extradata)
260  return AVERROR(ENOMEM);
261  if (s->flags&AV_CODEC_FLAG_PASS1) {
262 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
263  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
264  if (!avctx->stats_out)
265  return AVERROR(ENOMEM);
266  }
267  s->version = 2;
268 
269  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
270  s->bps = desc->comp[0].depth;
271  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
272  s->chroma = desc->nb_components > 2;
273  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
274  s->chroma_h_shift = desc->log2_chroma_w;
275  s->chroma_v_shift = desc->log2_chroma_h;
276 
277  switch (avctx->pix_fmt) {
278  case AV_PIX_FMT_YUV420P:
279  case AV_PIX_FMT_YUV422P:
280  if (avctx->width & 1) {
281  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
282  return AVERROR(EINVAL);
283  }
284  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
285  break;
286  case AV_PIX_FMT_YUV444P:
287  case AV_PIX_FMT_YUV410P:
288  case AV_PIX_FMT_YUV411P:
289  case AV_PIX_FMT_YUV440P:
290  case AV_PIX_FMT_GBRP:
291  case AV_PIX_FMT_GBRP9:
292  case AV_PIX_FMT_GBRP10:
293  case AV_PIX_FMT_GBRP12:
294  case AV_PIX_FMT_GBRP14:
295  case AV_PIX_FMT_GBRP16:
296  case AV_PIX_FMT_GRAY8:
297  case AV_PIX_FMT_GRAY16:
298  case AV_PIX_FMT_YUVA444P:
299  case AV_PIX_FMT_YUVA420P:
300  case AV_PIX_FMT_YUVA422P:
301  case AV_PIX_FMT_GBRAP:
302  case AV_PIX_FMT_YUV420P9:
307  case AV_PIX_FMT_YUV422P9:
312  case AV_PIX_FMT_YUV444P9:
326  s->version = 3;
327  break;
328  case AV_PIX_FMT_RGB32:
329  s->bitstream_bpp = 32;
330  break;
331  case AV_PIX_FMT_RGB24:
332  s->bitstream_bpp = 24;
333  break;
334  default:
335  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
336  return AVERROR(EINVAL);
337  }
338  s->n = 1<<s->bps;
339  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
340 
341  avctx->bits_per_coded_sample = s->bitstream_bpp;
342  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
343  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
344  if (s->context) {
345  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
346  av_log(avctx, AV_LOG_ERROR,
347  "context=1 is not compatible with "
348  "2 pass huffyuv encoding\n");
349  return AVERROR(EINVAL);
350  }
351  }
352 
353  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
354  if (s->interlaced != ( avctx->height > 288 ))
355  av_log(avctx, AV_LOG_INFO,
356  "using huffyuv 2.2.0 or newer interlacing flag\n");
357  }
358 
359  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
360  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
361  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
362  return AVERROR(EINVAL);
363  }
364 
365  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
366  av_log(avctx, AV_LOG_ERROR,
367  "Error: RGB is incompatible with median predictor\n");
368  return AVERROR(EINVAL);
369  }
370 
371  avctx->extradata[0] = s->predictor | (s->decorrelate << 6);
372  avctx->extradata[2] = s->interlaced ? 0x10 : 0x20;
373  if (s->context)
374  avctx->extradata[2] |= 0x40;
375  if (s->version < 3) {
376  avctx->extradata[1] = s->bitstream_bpp;
377  avctx->extradata[3] = 0;
378  } else {
379  avctx->extradata[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
380  if (s->chroma)
381  avctx->extradata[2] |= s->yuv ? 1 : 2;
382  if (s->alpha)
383  avctx->extradata[2] |= 4;
384  avctx->extradata[3] = 1;
385  }
386  avctx->extradata_size = 4;
387 
388  if (avctx->stats_in) {
389  char *p = avctx->stats_in;
390 
391  for (i = 0; i < 4; i++)
392  for (j = 0; j < s->vlc_n; j++)
393  s->stats[i][j] = 1;
394 
395  for (;;) {
396  for (i = 0; i < 4; i++) {
397  char *next;
398 
399  for (j = 0; j < s->vlc_n; j++) {
400  s->stats[i][j] += strtol(p, &next, 0);
401  if (next == p) return -1;
402  p = next;
403  }
404  }
405  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
406  }
407  } else {
408  for (i = 0; i < 4; i++)
409  for (j = 0; j < s->vlc_n; j++) {
410  int d = FFMIN(j, s->vlc_n - j);
411 
412  s->stats[i][j] = 100000000 / (d*d + 1);
413  }
414  }
415 
416  ret = store_huffman_tables(s, avctx->extradata + avctx->extradata_size);
417  if (ret < 0)
418  return ret;
419  avctx->extradata_size += ret;
420 
421  if (s->context) {
422  for (i = 0; i < 4; i++) {
423  int pels = avctx->width * avctx->height / (i ? 40 : 10);
424  for (j = 0; j < s->vlc_n; j++) {
425  int d = FFMIN(j, s->vlc_n - j);
426  s->stats[i][j] = pels/(d*d + 1);
427  }
428  }
429  } else {
430  for (i = 0; i < 4; i++)
431  for (j = 0; j < s->vlc_n; j++)
432  s->stats[i][j]= 0;
433  }
434 
435  s->picture_number=0;
436 
437  for (int i = 0; i < 3; i++) {
438  s->temp[i] = av_malloc(4 * avctx->width + 16);
439  if (!s->temp[i])
440  return AVERROR(ENOMEM);
441  }
442 
443  return 0;
444 }
445 static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
446 {
447  int i;
448  const uint8_t *y = s->temp[0] + offset;
449  const uint8_t *u = s->temp[1] + offset / 2;
450  const uint8_t *v = s->temp[2] + offset / 2;
451 
452  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
453  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
454  return -1;
455  }
456 
457 #define LOAD4\
458  int y0 = y[2 * i];\
459  int y1 = y[2 * i + 1];\
460  int u0 = u[i];\
461  int v0 = v[i];
462 
463  count /= 2;
464 
465  if (s->flags & AV_CODEC_FLAG_PASS1) {
466  for(i = 0; i < count; i++) {
467  LOAD4;
468  s->stats[0][y0]++;
469  s->stats[1][u0]++;
470  s->stats[0][y1]++;
471  s->stats[2][v0]++;
472  }
473  }
474  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
475  return 0;
476  if (s->context) {
477  for (i = 0; i < count; i++) {
478  LOAD4;
479  s->stats[0][y0]++;
480  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
481  s->stats[1][u0]++;
482  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
483  s->stats[0][y1]++;
484  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
485  s->stats[2][v0]++;
486  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
487  }
488  } else {
489  for(i = 0; i < count; i++) {
490  LOAD4;
491  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
492  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
493  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
494  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
495  }
496  }
497  return 0;
498 }
499 
500 static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
501 {
502  int count = width/2;
503 
504  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
505  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
506  return -1;
507  }
508 
509 #define LOADEND\
510  int y0 = s->temp[0][width-1];
511 #define LOADEND_14\
512  int y0 = s->temp16[0][width-1] & mask;
513 #define LOADEND_16\
514  int y0 = s->temp16[0][width-1];
515 #define STATEND\
516  s->stats[plane][y0]++;
517 #define STATEND_16\
518  s->stats[plane][y0>>2]++;
519 #define WRITEEND\
520  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
521 #define WRITEEND_16\
522  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
523  put_bits(&s->pb, 2, y0&3);
524 
525 #define LOAD2\
526  int y0 = s->temp[0][2 * i];\
527  int y1 = s->temp[0][2 * i + 1];
528 #define LOAD2_14\
529  int y0 = s->temp16[0][2 * i] & mask;\
530  int y1 = s->temp16[0][2 * i + 1] & mask;
531 #define LOAD2_16\
532  int y0 = s->temp16[0][2 * i];\
533  int y1 = s->temp16[0][2 * i + 1];
534 #define STAT2\
535  s->stats[plane][y0]++;\
536  s->stats[plane][y1]++;
537 #define STAT2_16\
538  s->stats[plane][y0>>2]++;\
539  s->stats[plane][y1>>2]++;
540 #define WRITE2\
541  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
542  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
543 #define WRITE2_16\
544  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
545  put_bits(&s->pb, 2, y0&3);\
546  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
547  put_bits(&s->pb, 2, y1&3);
548 
549 #define ENCODE_PLANE(LOAD, LOADEND, WRITE, WRITEEND, STAT, STATEND) \
550 do { \
551  if (s->flags & AV_CODEC_FLAG_PASS1) { \
552  for (int i = 0; i < count; i++) { \
553  LOAD; \
554  STAT; \
555  } \
556  if (width & 1) { \
557  LOADEND; \
558  STATEND; \
559  } \
560  } \
561  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) \
562  return 0; \
563  \
564  if (s->context) { \
565  for (int i = 0; i < count; i++) { \
566  LOAD; \
567  STAT; \
568  WRITE; \
569  } \
570  if (width & 1) { \
571  LOADEND; \
572  STATEND; \
573  WRITEEND; \
574  } \
575  } else { \
576  for (int i = 0; i < count; i++) { \
577  LOAD; \
578  WRITE; \
579  } \
580  if (width & 1) { \
581  LOADEND; \
582  WRITEEND; \
583  } \
584  } \
585 } while (0)
586 
587  if (s->bps <= 8) {
589  } else if (s->bps <= 14) {
590  int mask = s->n - 1;
591 
593  } else {
595  }
596 #undef LOAD2
597 #undef STAT2
598 #undef WRITE2
599  return 0;
600 }
601 
602 static int encode_gray_bitstream(HYuvEncContext *s, int count)
603 {
604  int i;
605 
606  if (put_bytes_left(&s->pb, 0) < 4 * count) {
607  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
608  return -1;
609  }
610 
611 #define LOAD2\
612  int y0 = s->temp[0][2 * i];\
613  int y1 = s->temp[0][2 * i + 1];
614 #define STAT2\
615  s->stats[0][y0]++;\
616  s->stats[0][y1]++;
617 #define WRITE2\
618  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
619  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
620 
621  count /= 2;
622 
623  if (s->flags & AV_CODEC_FLAG_PASS1) {
624  for (i = 0; i < count; i++) {
625  LOAD2;
626  STAT2;
627  }
628  }
629  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
630  return 0;
631 
632  if (s->context) {
633  for (i = 0; i < count; i++) {
634  LOAD2;
635  STAT2;
636  WRITE2;
637  }
638  } else {
639  for (i = 0; i < count; i++) {
640  LOAD2;
641  WRITE2;
642  }
643  }
644  return 0;
645 }
646 
647 static inline int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
648 {
649  int i;
650 
651  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
652  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
653  return -1;
654  }
655 
656 #define LOAD_GBRA \
657  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
658  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
659  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
660  int a = s->temp[0][planes * i + A];
661 
662 #define STAT_BGRA \
663  s->stats[0][b]++; \
664  s->stats[1][g]++; \
665  s->stats[2][r]++; \
666  if (planes == 4) \
667  s->stats[2][a]++;
668 
669 #define WRITE_GBRA \
670  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
671  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
672  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
673  if (planes == 4) \
674  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
675 
676  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
677  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
678  for (i = 0; i < count; i++) {
679  LOAD_GBRA;
680  STAT_BGRA;
681  }
682  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
683  for (i = 0; i < count; i++) {
684  LOAD_GBRA;
685  STAT_BGRA;
686  WRITE_GBRA;
687  }
688  } else {
689  for (i = 0; i < count; i++) {
690  LOAD_GBRA;
691  WRITE_GBRA;
692  }
693  }
694  return 0;
695 }
696 
698  const AVFrame *p, int *got_packet)
699 {
700  HYuvEncContext *s = avctx->priv_data;
701  const int width = avctx->width;
702  const int width2 = avctx->width >> 1;
703  const int height = avctx->height;
704  const int fake_ystride = (1 + s->interlaced) * p->linesize[0];
705  const int fake_ustride = (1 + s->interlaced) * p->linesize[1];
706  const int fake_vstride = (1 + s->interlaced) * p->linesize[2];
707  int i, j, size = 0, ret;
708 
709  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
710  return ret;
711 
712  if (s->context) {
714  if (size < 0)
715  return size;
716 
717  for (i = 0; i < 4; i++)
718  for (j = 0; j < s->vlc_n; j++)
719  s->stats[i][j] >>= 1;
720  }
721 
722  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
723 
724  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
725  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
726  int lefty, leftu, leftv, y, cy;
727 
728  put_bits(&s->pb, 8, leftv = p->data[2][0]);
729  put_bits(&s->pb, 8, lefty = p->data[0][1]);
730  put_bits(&s->pb, 8, leftu = p->data[1][0]);
731  put_bits(&s->pb, 8, p->data[0][0]);
732 
733  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
734  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
735  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
736 
738 
739  if (s->predictor==MEDIAN) {
740  int lefttopy, lefttopu, lefttopv;
741  cy = y = 1;
742  if (s->interlaced) {
743  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
744  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
745  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
746 
748  y++; cy++;
749  }
750 
751  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
752  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
753  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
754 
755  encode_422_bitstream(s, 0, 4);
756 
757  lefttopy = p->data[0][3];
758  lefttopu = p->data[1][1];
759  lefttopv = p->data[2][1];
760  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
761  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
762  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
763  encode_422_bitstream(s, 0, width - 4);
764  y++; cy++;
765 
766  for (; y < height; y++,cy++) {
767  const uint8_t *ydst, *udst, *vdst;
768 
769  if (s->bitstream_bpp == 12) {
770  while (2 * cy > y) {
771  ydst = p->data[0] + p->linesize[0] * y;
772  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
774  y++;
775  }
776  if (y >= height) break;
777  }
778  ydst = p->data[0] + p->linesize[0] * y;
779  udst = p->data[1] + p->linesize[1] * cy;
780  vdst = p->data[2] + p->linesize[2] * cy;
781 
782  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
783  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
784  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
785 
787  }
788  } else {
789  for (cy = y = 1; y < height; y++, cy++) {
790  const uint8_t *ydst, *udst, *vdst;
791 
792  /* encode a luma only line & y++ */
793  if (s->bitstream_bpp == 12) {
794  ydst = p->data[0] + p->linesize[0] * y;
795 
796  if (s->predictor == PLANE && s->interlaced < y) {
797  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
798 
799  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
800  } else {
801  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
802  }
804  y++;
805  if (y >= height) break;
806  }
807 
808  ydst = p->data[0] + p->linesize[0] * y;
809  udst = p->data[1] + p->linesize[1] * cy;
810  vdst = p->data[2] + p->linesize[2] * cy;
811 
812  if (s->predictor == PLANE && s->interlaced < cy) {
813  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
814  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
815  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
816 
817  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
818  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
819  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
820  } else {
821  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
822  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
823  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
824  }
825 
827  }
828  }
829  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
830  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
831  const int stride = -p->linesize[0];
832  const int fake_stride = -fake_ystride;
833  int leftr, leftg, leftb, lefta;
834 
835  put_bits(&s->pb, 8, lefta = data[A]);
836  put_bits(&s->pb, 8, leftr = data[R]);
837  put_bits(&s->pb, 8, leftg = data[G]);
838  put_bits(&s->pb, 8, leftb = data[B]);
839 
840  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
841  &leftr, &leftg, &leftb, &lefta);
842  encode_bgra_bitstream(s, width - 1, 4);
843 
844  for (int y = 1; y < height; y++) {
845  const uint8_t *dst = data + y*stride;
846  if (s->predictor == PLANE && s->interlaced < y) {
847  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
848  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
849  &leftr, &leftg, &leftb, &lefta);
850  } else {
851  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
852  &leftr, &leftg, &leftb, &lefta);
853  }
855  }
856  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
857  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
858  const int stride = -p->linesize[0];
859  const int fake_stride = -fake_ystride;
860  int leftr, leftg, leftb;
861 
862  put_bits(&s->pb, 8, leftr = data[0]);
863  put_bits(&s->pb, 8, leftg = data[1]);
864  put_bits(&s->pb, 8, leftb = data[2]);
865  put_bits(&s->pb, 8, 0);
866 
867  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
868  &leftr, &leftg, &leftb);
870 
871  for (int y = 1; y < height; y++) {
872  const uint8_t *dst = data + y * stride;
873  if (s->predictor == PLANE && s->interlaced < y) {
874  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
875  width * 3);
876  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
877  &leftr, &leftg, &leftb);
878  } else {
879  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
880  &leftr, &leftg, &leftb);
881  }
883  }
884  } else if (s->version > 2) {
885  int plane;
886  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
887  int left, y;
888  int w = width;
889  int h = height;
890  int fake_stride = fake_ystride;
891 
892  if (s->chroma && (plane == 1 || plane == 2)) {
893  w >>= s->chroma_h_shift;
894  h >>= s->chroma_v_shift;
895  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
896  }
897 
898  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
899 
900  encode_plane_bitstream(s, w, plane);
901 
902  if (s->predictor==MEDIAN) {
903  int lefttop;
904  y = 1;
905  if (s->interlaced) {
906  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
907 
908  encode_plane_bitstream(s, w, plane);
909  y++;
910  }
911 
912  lefttop = p->data[plane][0];
913 
914  for (; y < h; y++) {
915  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
916 
917  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
918 
919  encode_plane_bitstream(s, w, plane);
920  }
921  } else {
922  for (y = 1; y < h; y++) {
923  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
924 
925  if (s->predictor == PLANE && s->interlaced < y) {
926  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
927 
928  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
929  } else {
930  left = sub_left_prediction(s, s->temp[0], dst, w , left);
931  }
932 
933  encode_plane_bitstream(s, w, plane);
934  }
935  }
936  }
937  } else {
938  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
939  }
940  emms_c();
941 
942  size += (put_bits_count(&s->pb) + 31) / 8;
943  put_bits(&s->pb, 16, 0);
944  put_bits(&s->pb, 15, 0);
945  size /= 4;
946 
947  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
948  int j;
949  char *p = avctx->stats_out;
950  char *end = p + STATS_OUT_SIZE;
951  for (i = 0; i < 4; i++) {
952  for (j = 0; j < s->vlc_n; j++) {
953  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
954  p += strlen(p);
955  s->stats[i][j]= 0;
956  }
957  snprintf(p, end-p, "\n");
958  p++;
959  if (end <= p)
960  return AVERROR(ENOMEM);
961  }
962  } else if (avctx->stats_out)
963  avctx->stats_out[0] = '\0';
964  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
965  flush_put_bits(&s->pb);
966  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
967  }
968 
969  s->picture_number++;
970 
971  pkt->size = size * 4;
972  *got_packet = 1;
973 
974  return 0;
975 }
976 
978 {
979  HYuvEncContext *s = avctx->priv_data;
980 
981  av_freep(&avctx->stats_out);
982 
983  for (int i = 0; i < 3; i++)
984  av_freep(&s->temp[i]);
985 
986  return 0;
987 }
988 
989 #define OFFSET(x) offsetof(HYuvEncContext, x)
990 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
991 
992 static const AVOption options[] = {
993  /* ffvhuff-only options */
994  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
995  /* Common options */
996  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism",
997  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 },
998  0, 1, VE },
999  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, .unit = "pred" },
1000  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1001  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1002  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1003  { NULL },
1004 };
1005 
1006 static const AVClass normal_class = {
1007  .class_name = "huffyuv",
1008  .item_name = av_default_item_name,
1009  .option = options + 1,
1010  .version = LIBAVUTIL_VERSION_INT,
1011 };
1012 
1014  .p.name = "huffyuv",
1015  CODEC_LONG_NAME("Huffyuv / HuffYUV"),
1016  .p.type = AVMEDIA_TYPE_VIDEO,
1017  .p.id = AV_CODEC_ID_HUFFYUV,
1018  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1020  .priv_data_size = sizeof(HYuvEncContext),
1021  .init = encode_init,
1023  .close = encode_end,
1024  .p.priv_class = &normal_class,
1025  .p.pix_fmts = (const enum AVPixelFormat[]){
1028  },
1029  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1030 };
1031 
1032 #if CONFIG_FFVHUFF_ENCODER
1033 static const AVClass ff_class = {
1034  .class_name = "ffvhuff",
1035  .item_name = av_default_item_name,
1036  .option = options,
1037  .version = LIBAVUTIL_VERSION_INT,
1038 };
1039 
1040 const FFCodec ff_ffvhuff_encoder = {
1041  .p.name = "ffvhuff",
1042  CODEC_LONG_NAME("Huffyuv FFmpeg variant"),
1043  .p.type = AVMEDIA_TYPE_VIDEO,
1044  .p.id = AV_CODEC_ID_FFVHUFF,
1045  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1047  .priv_data_size = sizeof(HYuvEncContext),
1048  .init = encode_init,
1050  .close = encode_end,
1051  .p.priv_class = &ff_class,
1052  .p.pix_fmts = (const enum AVPixelFormat[]){
1068  },
1069  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1070 };
1071 #endif
HYuvEncContext::chroma_h_shift
int chroma_h_shift
Definition: huffyuvenc.c:62
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:522
A
#define A(x)
Definition: vpx_arith.h:28
bswapdsp.h
HYuvEncContext::flags
int flags
Definition: huffyuvenc.c:64
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
store_huffman_tables
static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
Definition: huffyuvenc.c:222
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:77
encode_422_bitstream
static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
Definition: huffyuvenc.c:445
HYuvEncContext::stats
uint64_t stats[4][MAX_VLC_N]
Definition: huffyuvenc.c:72
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:122
src1
const pixel * src1
Definition: h264pred_template.c:421
HYuvEncContext::chroma_v_shift
int chroma_v_shift
Definition: huffyuvenc.c:63
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:50
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:989
encode_gray_bitstream
static int encode_gray_bitstream(HYuvEncContext *s, int count)
Definition: huffyuvenc.c:602
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
Definition: huffyuvenc.c:647
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:244
store_table
static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:197
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:514
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:524
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:516
AVOption
AVOption.
Definition: opt.h:357
HYuvEncContext::interlaced
int interlaced
Definition: huffyuvenc.c:52
encode.h
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
FFCodec
Definition: codec_internal.h:126
STATS_OUT_SIZE
#define STATS_OUT_SIZE
MEDIAN
@ MEDIAN
Definition: huffyuv.h:55
encode_plane_bitstream
static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
Definition: huffyuvenc.c:500
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
HYuvEncContext::temp16
uint16_t * temp16[3]
Definition: huffyuvenc.c:70
HYuvEncContext::yuv
int yuv
Definition: huffyuvenc.c:61
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:517
HYuvEncContext::bitstream_bpp
int bitstream_bpp
Definition: huffyuvenc.c:54
HYuvEncContext::decorrelate
int decorrelate
Definition: huffyuvenc.c:53
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:513
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
v0
#define v0
Definition: regdef.h:26
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:158
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
HYuvEncContext::vlc_n
int vlc_n
Definition: huffyuvenc.c:58
HYuvEncContext::non_determ
int non_determ
Definition: huffyuvenc.c:78
LOAD2
#define LOAD2
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:361
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
mask
static const uint16_t mask[17]
Definition: lzw.c:38
emms_c
#define emms_c()
Definition: emms.h:63
STATEND
#define STATEND
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
planes
static const struct @417 planes[]
WRITE2
#define WRITE2
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1342
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
g
const char * g
Definition: vf_curves.c:128
HYuvEncContext::pb
PutBitContext pb
Definition: huffyuvenc.c:50
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1006
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
HuffYUVEncDSPContext
Definition: huffyuvencdsp.h:26
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:119
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:49
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
if
if(ret)
Definition: filter_design.txt:179
LOADEND_16
#define LOADEND_16
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
WRITEEND
#define WRITEEND
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
ff_huffyuv_encoder
const FFCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1013
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:40
HYuvEncContext::context
int context
Definition: huffyuvenc.c:65
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
HYuvEncContext::len
uint8_t len[4][MAX_VLC_N]
Definition: huffyuvenc.c:73
index
int index
Definition: gxfenc.c:90
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:977
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:100
LOAD2_16
#define LOAD2_16
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)
Definition: huffyuvenc.c:697
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
HYuvEncContext::temp
uint8_t * temp[3]
Definition: huffyuvenc.c:69
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
codec_internal.h
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
HYuvEncContext
Definition: huffyuvenc.c:47
HYuvEncContext::n
int n
Definition: huffyuvenc.c:57
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
LEFT
#define LEFT
Definition: cdgraphics.c:171
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
height
#define height
Predictor
Definition: ratecontrol.h:33
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:518
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:169
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
VE
#define VE
Definition: huffyuvenc.c:990
HYuvEncContext::chroma
int chroma
Definition: huffyuvenc.c:60
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
emms.h
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1568
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
HYuvEncContext::alpha
int alpha
Definition: huffyuvenc.c:59
sub_left_prediction
static int sub_left_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:91
src2
const pixel * src2
Definition: h264pred_template.c:422
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
HYuvEncContext::bps
int bps
Definition: huffyuvenc.c:56
HYuvEncContext::version
int version
Definition: huffyuvenc.c:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HYuvEncContext::bdsp
BswapDSPContext bdsp
Definition: huffyuvenc.c:75
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:54
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
sub_median_prediction
static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:186
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
HYuvEncContext::hencdsp
HuffYUVEncDSPContext hencdsp
Definition: huffyuvenc.c:76
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:515
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
HYuvEncContext::picture_number
int picture_number
Definition: huffyuvenc.c:66
AVCodecContext
main external API structure.
Definition: avcodec.h:445
LOAD_GBRA
#define LOAD_GBRA
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
huffman.h
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
HYuvEncContext::predictor
Predictor predictor
Definition: huffyuvenc.c:51
src0
const pixel *const src0
Definition: h264pred_template.c:420
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_ffvhuff_encoder
const FFCodec ff_ffvhuff_encoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:261
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvencdsp.c:87
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HYuvEncContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: huffyuvenc.c:77
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2038
options
static const AVOption options[]
Definition: huffyuvenc.c:992
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
huffyuv.h
diff_bytes
static void diff_bytes(HYuvEncContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:81
ENCODE_PLANE
#define ENCODE_PLANE(LOAD, LOADEND, WRITE, WRITEEND, STAT, STATEND)
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
HYuvEncContext::avctx
AVCodecContext * avctx
Definition: huffyuvenc.c:49
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
STAT_BGRA
#define STAT_BGRA
HYuvEncContext::bits
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuvenc.c:74
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310