FFmpeg
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "codec_internal.h"
33 #include "encode.h"
34 #include "bswapdsp.h"
35 #include "bytestream.h"
36 #include "lossless_videoencdsp.h"
37 #include "put_bits.h"
38 #include "mathops.h"
39 #include "utvideo.h"
40 #include "huffman.h"
41 
42 typedef struct UtvideoContext {
43  const AVClass *class;
46 
47  uint32_t frame_info_size, flags;
48  int planes;
49  int slices;
50  int compression;
51  int frame_pred;
52 
53  ptrdiff_t slice_stride;
54  uint8_t *slice_bits, *slice_buffer[4];
55  int slice_bits_size;
57 
58 typedef struct HuffEntry {
59  uint16_t sym;
60  uint8_t len;
61  uint32_t code;
62 } HuffEntry;
63 
64 /* Compare huffman tree nodes */
65 static int ut_huff_cmp_len(const void *a, const void *b)
66 {
67  const HuffEntry *aa = a, *bb = b;
68  return (aa->len - bb->len)*256 + aa->sym - bb->sym;
69 }
70 
71 /* Compare huffentry symbols */
72 static int huff_cmp_sym(const void *a, const void *b)
73 {
74  const HuffEntry *aa = a, *bb = b;
75  return aa->sym - bb->sym;
76 }
77 
79 {
80  UtvideoContext *c = avctx->priv_data;
81  int i;
82 
83  av_freep(&c->slice_bits);
84  for (i = 0; i < 4; i++)
85  av_freep(&c->slice_buffer[i]);
86 
87  return 0;
88 }
89 
91 {
92  UtvideoContext *c = avctx->priv_data;
93  int i, subsampled_height;
94  uint32_t original_format;
95 
96  c->frame_info_size = 4;
97  c->slice_stride = FFALIGN(avctx->width, 32);
98 
99  switch (avctx->pix_fmt) {
100  case AV_PIX_FMT_GBRP:
101  c->planes = 3;
102  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
103  original_format = UTVIDEO_RGB;
104  break;
105  case AV_PIX_FMT_GBRAP:
106  c->planes = 4;
107  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
108  original_format = UTVIDEO_RGBA;
109  avctx->bits_per_coded_sample = 32;
110  break;
111  case AV_PIX_FMT_YUV420P:
112  if (avctx->width & 1 || avctx->height & 1) {
113  av_log(avctx, AV_LOG_ERROR,
114  "4:2:0 video requires even width and height.\n");
115  return AVERROR_INVALIDDATA;
116  }
117  c->planes = 3;
118  if (avctx->colorspace == AVCOL_SPC_BT709)
119  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
120  else
121  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
122  original_format = UTVIDEO_420;
123  break;
124  case AV_PIX_FMT_YUV422P:
125  if (avctx->width & 1) {
126  av_log(avctx, AV_LOG_ERROR,
127  "4:2:2 video requires even width.\n");
128  return AVERROR_INVALIDDATA;
129  }
130  c->planes = 3;
131  if (avctx->colorspace == AVCOL_SPC_BT709)
132  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
133  else
134  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
135  original_format = UTVIDEO_422;
136  break;
137  case AV_PIX_FMT_YUV444P:
138  c->planes = 3;
139  if (avctx->colorspace == AVCOL_SPC_BT709)
140  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
141  else
142  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
143  original_format = UTVIDEO_444;
144  break;
145  default:
146  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
147  avctx->pix_fmt);
148  return AVERROR_INVALIDDATA;
149  }
150 
151  ff_bswapdsp_init(&c->bdsp);
152  ff_llvidencdsp_init(&c->llvidencdsp);
153 
154  if (c->frame_pred == PRED_GRADIENT) {
155  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
157  }
158 
159  /*
160  * Check the asked slice count for obviously invalid
161  * values (> 256 or negative).
162  */
163  if (avctx->slices > 256 || avctx->slices < 0) {
164  av_log(avctx, AV_LOG_ERROR,
165  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
166  avctx->slices);
167  return AVERROR(EINVAL);
168  }
169 
170  /* Check that the slice count is not larger than the subsampled height */
171  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
172  if (avctx->slices > subsampled_height) {
173  av_log(avctx, AV_LOG_ERROR,
174  "Slice count %d is larger than the subsampling-applied height %d.\n",
175  avctx->slices, subsampled_height);
176  return AVERROR(EINVAL);
177  }
178 
179  /* extradata size is 4 * 32 bits */
180  avctx->extradata_size = 16;
181 
182  avctx->extradata = av_mallocz(avctx->extradata_size +
184 
185  if (!avctx->extradata) {
186  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
187  return AVERROR(ENOMEM);
188  }
189 
190  for (i = 0; i < c->planes; i++) {
191  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
193  if (!c->slice_buffer[i]) {
194  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
195  return AVERROR(ENOMEM);
196  }
197  }
198 
199  /*
200  * Set the version of the encoder.
201  * Last byte is "implementation ID", which is
202  * obtained from the creator of the format.
203  * Libavcodec has been assigned with the ID 0xF0.
204  */
205  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
206 
207  /*
208  * Set the "original format"
209  * Not used for anything during decoding.
210  */
211  AV_WL32(avctx->extradata + 4, original_format);
212 
213  /* Write 4 as the 'frame info size' */
214  AV_WL32(avctx->extradata + 8, c->frame_info_size);
215 
216  /*
217  * Set how many slices are going to be used.
218  * By default uses multiple slices depending on the subsampled height.
219  * This enables multithreading in the official decoder.
220  */
221  if (!avctx->slices) {
222  c->slices = subsampled_height / 120;
223 
224  if (!c->slices)
225  c->slices = 1;
226  else if (c->slices > 256)
227  c->slices = 256;
228  } else {
229  c->slices = avctx->slices;
230  }
231 
232  /* Set compression mode */
233  c->compression = COMP_HUFF;
234 
235  /*
236  * Set the encoding flags:
237  * - Slice count minus 1
238  * - Interlaced encoding mode flag, set to zero for now.
239  * - Compression mode (none/huff)
240  * And write the flags.
241  */
242  c->flags = (c->slices - 1) << 24;
243  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
244  c->flags |= c->compression;
245 
246  AV_WL32(avctx->extradata + 12, c->flags);
247 
248  return 0;
249 }
250 
251 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
252  uint8_t *const src[4], int planes, const int stride[4],
253  int width, int height)
254 {
255  int i, j;
256  int k = 2 * dst_stride;
257  const uint8_t *sg = src[0];
258  const uint8_t *sb = src[1];
259  const uint8_t *sr = src[2];
260  const uint8_t *sa = src[3];
261  unsigned int g;
262 
263  for (j = 0; j < height; j++) {
264  if (planes == 3) {
265  for (i = 0; i < width; i++) {
266  g = sg[i];
267  dst[0][k] = g;
268  g += 0x80;
269  dst[1][k] = sb[i] - g;
270  dst[2][k] = sr[i] - g;
271  k++;
272  }
273  } else {
274  for (i = 0; i < width; i++) {
275  g = sg[i];
276  dst[0][k] = g;
277  g += 0x80;
278  dst[1][k] = sb[i] - g;
279  dst[2][k] = sr[i] - g;
280  dst[3][k] = sa[i];
281  k++;
282  }
283  sa += stride[3];
284  }
285  k += dst_stride - width;
286  sg += stride[0];
287  sb += stride[1];
288  sr += stride[2];
289  }
290 }
291 
292 #undef A
293 #undef B
294 
295 /* Write data to a plane with median prediction */
296 static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst,
297  ptrdiff_t stride, int width, int height)
298 {
299  int i, j;
300  int A, B;
301  uint8_t prev;
302 
303  /* First line uses left neighbour prediction */
304  prev = 0x80; /* Set the initial value */
305  for (i = 0; i < width; i++) {
306  *dst++ = src[i] - prev;
307  prev = src[i];
308  }
309 
310  if (height == 1)
311  return;
312 
313  src += stride;
314 
315  /*
316  * Second line uses top prediction for the first sample,
317  * and median for the rest.
318  */
319  A = B = 0;
320 
321  /* Rest of the coded part uses median prediction */
322  for (j = 1; j < height; j++) {
323  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
324  dst += width;
325  src += stride;
326  }
327 }
328 
329 /* Count the usage of values in a plane */
330 static void count_usage(uint8_t *src, int width,
331  int height, uint64_t *counts)
332 {
333  int i, j;
334 
335  for (j = 0; j < height; j++) {
336  for (i = 0; i < width; i++) {
337  counts[src[i]]++;
338  }
339  src += width;
340  }
341 }
342 
343 /* Calculate the actual huffman codes from the code lengths */
344 static void calculate_codes(HuffEntry *he)
345 {
346  int last, i;
347  uint32_t code;
348 
349  qsort(he, 256, sizeof(*he), ut_huff_cmp_len);
350 
351  last = 255;
352  while (he[last].len == 255 && last)
353  last--;
354 
355  code = 0;
356  for (i = last; i >= 0; i--) {
357  he[i].code = code >> (32 - he[i].len);
358  code += 0x80000000u >> (he[i].len - 1);
359  }
360 
361  qsort(he, 256, sizeof(*he), huff_cmp_sym);
362 }
363 
364 /* Write huffman bit codes to a memory block */
365 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
366  int width, int height, HuffEntry *he)
367 {
368  PutBitContext pb;
369  int i, j;
370  int count;
371 
372  init_put_bits(&pb, dst, dst_size);
373 
374  /* Write the codes */
375  for (j = 0; j < height; j++) {
376  for (i = 0; i < width; i++)
377  put_bits(&pb, he[src[i]].len, he[src[i]].code);
378 
379  src += width;
380  }
381 
382  /* Pad output to a 32-bit boundary */
383  count = put_bits_count(&pb) & 0x1F;
384 
385  if (count)
386  put_bits(&pb, 32 - count, 0);
387 
388  /* Flush the rest with zeroes */
389  flush_put_bits(&pb);
390 
391  /* Return the amount of bytes written */
392  return put_bytes_output(&pb);
393 }
394 
395 static int encode_plane(AVCodecContext *avctx, const uint8_t *src,
396  uint8_t *dst, ptrdiff_t stride, int plane_no,
397  int width, int height, PutByteContext *pb)
398 {
399  UtvideoContext *c = avctx->priv_data;
400  uint8_t lengths[256];
401  uint64_t counts[256] = { 0 };
402 
403  HuffEntry he[256];
404 
405  uint32_t offset = 0, slice_len = 0;
406  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
407  int i, sstart, send = 0;
408  int symbol;
409  int ret;
410 
411  /* Do prediction / make planes */
412  switch (c->frame_pred) {
413  case PRED_NONE:
414  for (i = 0; i < c->slices; i++) {
415  sstart = send;
416  send = height * (i + 1) / c->slices & cmask;
417  av_image_copy_plane(dst + sstart * width, width,
418  src + sstart * stride, stride,
419  width, send - sstart);
420  }
421  break;
422  case PRED_LEFT:
423  for (i = 0; i < c->slices; i++) {
424  sstart = send;
425  send = height * (i + 1) / c->slices & cmask;
426  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
427  }
428  break;
429  case PRED_MEDIAN:
430  for (i = 0; i < c->slices; i++) {
431  sstart = send;
432  send = height * (i + 1) / c->slices & cmask;
433  median_predict(c, src + sstart * stride, dst + sstart * width,
434  stride, width, send - sstart);
435  }
436  break;
437  default:
438  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
439  c->frame_pred);
441  }
442 
443  /* Count the usage of values */
444  count_usage(dst, width, height, counts);
445 
446  /* Check for a special case where only one symbol was used */
447  for (symbol = 0; symbol < 256; symbol++) {
448  /* If non-zero count is found, see if it matches width * height */
449  if (counts[symbol]) {
450  /* Special case if only one symbol was used */
451  if (counts[symbol] == width * (int64_t)height) {
452  /*
453  * Write a zero for the single symbol
454  * used in the plane, else 0xFF.
455  */
456  for (i = 0; i < 256; i++) {
457  if (i == symbol)
458  bytestream2_put_byte(pb, 0);
459  else
460  bytestream2_put_byte(pb, 0xFF);
461  }
462 
463  /* Write zeroes for lengths */
464  for (i = 0; i < c->slices; i++)
465  bytestream2_put_le32(pb, 0);
466 
467  /* And that's all for that plane folks */
468  return 0;
469  }
470  break;
471  }
472  }
473 
474  /* Calculate huffman lengths */
475  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
476  return ret;
477 
478  /*
479  * Write the plane's header into the output packet:
480  * - huffman code lengths (256 bytes)
481  * - slice end offsets (gotten from the slice lengths)
482  */
483  for (i = 0; i < 256; i++) {
484  bytestream2_put_byte(pb, lengths[i]);
485 
486  he[i].len = lengths[i];
487  he[i].sym = i;
488  }
489 
490  /* Calculate the huffman codes themselves */
491  calculate_codes(he);
492 
493  send = 0;
494  for (i = 0; i < c->slices; i++) {
495  sstart = send;
496  send = height * (i + 1) / c->slices & cmask;
497 
498  /*
499  * Write the huffman codes to a buffer,
500  * get the offset in bytes.
501  */
502  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
503  width * height + 4, width,
504  send - sstart, he);
505 
506  slice_len = offset - slice_len;
507 
508  /* Byteswap the written huffman codes */
509  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
510  (uint32_t *) c->slice_bits,
511  slice_len >> 2);
512 
513  /* Write the offset to the stream */
514  bytestream2_put_le32(pb, offset);
515 
516  /* Seek to the data part of the packet */
517  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
518  offset - slice_len, SEEK_CUR);
519 
520  /* Write the slices' data into the output packet */
521  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
522 
523  /* Seek back to the slice offsets */
524  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
525  SEEK_CUR);
526 
527  slice_len = offset;
528  }
529 
530  /* And at the end seek to the end of written slice(s) */
531  bytestream2_seek_p(pb, offset, SEEK_CUR);
532 
533  return 0;
534 }
535 
537  const AVFrame *pic, int *got_packet)
538 {
539  UtvideoContext *c = avctx->priv_data;
540  PutByteContext pb;
541 
542  uint32_t frame_info;
543 
544  uint8_t *dst;
545 
546  int width = avctx->width, height = avctx->height;
547  int i, ret = 0;
548 
549  /* Allocate a new packet if needed, and set it to the pointer dst */
550  ret = ff_alloc_packet(avctx, pkt, (256 + 4 * c->slices + width * height)
551  * c->planes + 4);
552 
553  if (ret < 0)
554  return ret;
555 
556  dst = pkt->data;
557 
558  bytestream2_init_writer(&pb, dst, pkt->size);
559 
560  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
561 
562  if (!c->slice_bits) {
563  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
564  return AVERROR(ENOMEM);
565  }
566 
567  /* In case of RGB, mangle the planes to Ut Video's format */
568  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
569  mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
570  c->planes, pic->linesize, width, height);
571 
572  /* Deal with the planes */
573  switch (avctx->pix_fmt) {
574  case AV_PIX_FMT_GBRP:
575  case AV_PIX_FMT_GBRAP:
576  for (i = 0; i < c->planes; i++) {
577  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
578  c->slice_buffer[i], c->slice_stride, i,
579  width, height, &pb);
580 
581  if (ret) {
582  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
583  return ret;
584  }
585  }
586  break;
587  case AV_PIX_FMT_YUV444P:
588  for (i = 0; i < c->planes; i++) {
589  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
590  pic->linesize[i], i, width, height, &pb);
591 
592  if (ret) {
593  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
594  return ret;
595  }
596  }
597  break;
598  case AV_PIX_FMT_YUV422P:
599  for (i = 0; i < c->planes; i++) {
600  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
601  pic->linesize[i], i, width >> !!i, height, &pb);
602 
603  if (ret) {
604  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
605  return ret;
606  }
607  }
608  break;
609  case AV_PIX_FMT_YUV420P:
610  for (i = 0; i < c->planes; i++) {
611  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
612  pic->linesize[i], i, width >> !!i, height >> !!i,
613  &pb);
614 
615  if (ret) {
616  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
617  return ret;
618  }
619  }
620  break;
621  default:
622  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
623  avctx->pix_fmt);
624  return AVERROR_INVALIDDATA;
625  }
626 
627  /*
628  * Write frame information (LE 32-bit unsigned)
629  * into the output packet.
630  * Contains the prediction method.
631  */
632  frame_info = c->frame_pred << 8;
633  bytestream2_put_le32(&pb, frame_info);
634 
635  pkt->size = bytestream2_tell_p(&pb);
636 
637  /* Packet should be done */
638  *got_packet = 1;
639 
640  return 0;
641 }
642 
643 #define OFFSET(x) offsetof(UtvideoContext, x)
644 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
645 static const AVOption options[] = {
646 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, .unit = "pred" },
647  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, .unit = "pred" },
648  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, .unit = "pred" },
649  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, .unit = "pred" },
650  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, .unit = "pred" },
651 
652  { NULL},
653 };
654 
655 static const AVClass utvideo_class = {
656  .class_name = "utvideo",
657  .item_name = av_default_item_name,
658  .option = options,
659  .version = LIBAVUTIL_VERSION_INT,
660 };
661 
663  .p.name = "utvideo",
664  CODEC_LONG_NAME("Ut Video"),
665  .p.type = AVMEDIA_TYPE_VIDEO,
666  .p.id = AV_CODEC_ID_UTVIDEO,
667  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
669  .priv_data_size = sizeof(UtvideoContext),
670  .p.priv_class = &utvideo_class,
671  .init = utvideo_encode_init,
673  .close = utvideo_encode_close,
674  .p.pix_fmts = (const enum AVPixelFormat[]) {
677  },
678  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
679 };
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
bswapdsp.h
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:34
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
UTVIDEO_420
@ UTVIDEO_420
Definition: utvideo.h:54
HuffEntry::len
uint8_t len
Definition: exr.c:95
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
UTVIDEO_RGB
@ UTVIDEO_RGB
Definition: utvideo.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
AVPacket::data
uint8_t * data
Definition: packet.h:522
count_usage
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:330
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
FFCodec
Definition: codec_internal.h:127
UtvideoContext::slices
int slices
Definition: utvideodec.c:53
utvideo_encode_frame
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:536
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
write_huff_codes
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:365
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:296
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
width
#define width
intreadwrite.h
huff_cmp_sym
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:72
COMP_HUFF
@ COMP_HUFF
Definition: utvideo.h:41
g
const char * g
Definition: vf_curves.c:127
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
B
#define B
Definition: huffyuv.h:42
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
mangle_rgb_planes
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:251
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ff_utvideo_encoder
const FFCodec ff_utvideo_encoder
Definition: utvideoenc.c:662
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
UTVIDEO_RGBA
@ UTVIDEO_RGBA
Definition: utvideo.h:53
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
UtvideoContext::slice_bits
uint8_t * slice_bits
Definition: utvideodec.c:60
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
UtvideoContext::frame_pred
int frame_pred
Definition: utvideodec.c:56
mathops.h
UTVIDEO_444
@ UTVIDEO_444
Definition: utvideo.h:56
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
utvideo_encode_init
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:90
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:102
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:417
PutByteContext
Definition: bytestream.h:37
UtvideoContext::slice_stride
ptrdiff_t slice_stride
Definition: utvideoenc.c:53
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
codec_internal.h
VE
#define VE
Definition: utvideoenc.c:644
PRED_NONE
@ PRED_NONE
Definition: utvideo.h:33
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:36
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
utvideo_encode_close
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:78
ut_huff_cmp_len
static int ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideoenc.c:65
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1567
UTVIDEO_422
@ UTVIDEO_422
Definition: utvideo.h:55
options
static const AVOption options[]
Definition: utvideoenc.c:645
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
UtvideoContext
Definition: utvideodec.c:45
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
avcodec.h
median_predict
static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:296
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
planes
static const struct @383 planes[]
HuffEntry::code
uint32_t code
Definition: exr.c:97
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
UtvideoContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: utvideoenc.c:45
UtvideoContext::frame_info_size
uint32_t frame_info_size
Definition: utvideodec.c:51
UtvideoContext::slice_buffer
uint8_t * slice_buffer[4]
Definition: utvideoenc.c:54
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
huffman.h
HuffEntry
Definition: exr.c:94
UtvideoContext::compression
int compression
Definition: utvideodec.c:54
UtvideoContext::flags
uint32_t flags
Definition: utvideodec.c:51
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
encode_plane
static int encode_plane(AVCodecContext *avctx, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:395
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
utvideo_class
static const AVClass utvideo_class
Definition: utvideoenc.c:655
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1047
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
UtvideoContext::slice_bits_size
int slice_bits_size
Definition: utvideodec.c:61
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
UtvideoContext::bdsp
BswapDSPContext bdsp
Definition: utvideodec.c:48
bytestream.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
UtvideoContext::planes
int planes
Definition: utvideodec.c:52
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:35
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
calculate_codes
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:344
OFFSET
#define OFFSET(x)
Definition: utvideoenc.c:643