FFmpeg
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "codec_internal.h"
33 #include "encode.h"
34 #include "bswapdsp.h"
35 #include "bytestream.h"
36 #include "put_bits.h"
37 #include "mathops.h"
38 #include "utvideo.h"
39 #include "huffman.h"
40 
41 typedef struct HuffEntry {
42  uint16_t sym;
43  uint8_t len;
44  uint32_t code;
45 } HuffEntry;
46 
47 /* Compare huffman tree nodes */
48 static int ut_huff_cmp_len(const void *a, const void *b)
49 {
50  const HuffEntry *aa = a, *bb = b;
51  return (aa->len - bb->len)*256 + aa->sym - bb->sym;
52 }
53 
54 /* Compare huffentry symbols */
55 static int huff_cmp_sym(const void *a, const void *b)
56 {
57  const HuffEntry *aa = a, *bb = b;
58  return aa->sym - bb->sym;
59 }
60 
62 {
63  UtvideoContext *c = avctx->priv_data;
64  int i;
65 
66  av_freep(&c->slice_bits);
67  for (i = 0; i < 4; i++)
68  av_freep(&c->slice_buffer[i]);
69 
70  return 0;
71 }
72 
74 {
75  UtvideoContext *c = avctx->priv_data;
76  int i, subsampled_height;
77  uint32_t original_format;
78 
79  c->avctx = avctx;
80  c->frame_info_size = 4;
81  c->slice_stride = FFALIGN(avctx->width, 32);
82 
83  switch (avctx->pix_fmt) {
84  case AV_PIX_FMT_GBRP:
85  c->planes = 3;
86  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
87  original_format = UTVIDEO_RGB;
88  break;
89  case AV_PIX_FMT_GBRAP:
90  c->planes = 4;
91  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
92  original_format = UTVIDEO_RGBA;
93  avctx->bits_per_coded_sample = 32;
94  break;
95  case AV_PIX_FMT_YUV420P:
96  if (avctx->width & 1 || avctx->height & 1) {
97  av_log(avctx, AV_LOG_ERROR,
98  "4:2:0 video requires even width and height.\n");
99  return AVERROR_INVALIDDATA;
100  }
101  c->planes = 3;
102  if (avctx->colorspace == AVCOL_SPC_BT709)
103  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
104  else
105  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
106  original_format = UTVIDEO_420;
107  break;
108  case AV_PIX_FMT_YUV422P:
109  if (avctx->width & 1) {
110  av_log(avctx, AV_LOG_ERROR,
111  "4:2:2 video requires even width.\n");
112  return AVERROR_INVALIDDATA;
113  }
114  c->planes = 3;
115  if (avctx->colorspace == AVCOL_SPC_BT709)
116  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
117  else
118  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
119  original_format = UTVIDEO_422;
120  break;
121  case AV_PIX_FMT_YUV444P:
122  c->planes = 3;
123  if (avctx->colorspace == AVCOL_SPC_BT709)
124  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
125  else
126  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
127  original_format = UTVIDEO_444;
128  break;
129  default:
130  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
131  avctx->pix_fmt);
132  return AVERROR_INVALIDDATA;
133  }
134 
135  ff_bswapdsp_init(&c->bdsp);
136  ff_llvidencdsp_init(&c->llvidencdsp);
137 
138  if (c->frame_pred == PRED_GRADIENT) {
139  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
141  }
142 
143  /*
144  * Check the asked slice count for obviously invalid
145  * values (> 256 or negative).
146  */
147  if (avctx->slices > 256 || avctx->slices < 0) {
148  av_log(avctx, AV_LOG_ERROR,
149  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
150  avctx->slices);
151  return AVERROR(EINVAL);
152  }
153 
154  /* Check that the slice count is not larger than the subsampled height */
155  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
156  if (avctx->slices > subsampled_height) {
157  av_log(avctx, AV_LOG_ERROR,
158  "Slice count %d is larger than the subsampling-applied height %d.\n",
159  avctx->slices, subsampled_height);
160  return AVERROR(EINVAL);
161  }
162 
163  /* extradata size is 4 * 32 bits */
164  avctx->extradata_size = 16;
165 
166  avctx->extradata = av_mallocz(avctx->extradata_size +
168 
169  if (!avctx->extradata) {
170  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
171  return AVERROR(ENOMEM);
172  }
173 
174  for (i = 0; i < c->planes; i++) {
175  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
177  if (!c->slice_buffer[i]) {
178  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
179  return AVERROR(ENOMEM);
180  }
181  }
182 
183  /*
184  * Set the version of the encoder.
185  * Last byte is "implementation ID", which is
186  * obtained from the creator of the format.
187  * Libavcodec has been assigned with the ID 0xF0.
188  */
189  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
190 
191  /*
192  * Set the "original format"
193  * Not used for anything during decoding.
194  */
195  AV_WL32(avctx->extradata + 4, original_format);
196 
197  /* Write 4 as the 'frame info size' */
198  AV_WL32(avctx->extradata + 8, c->frame_info_size);
199 
200  /*
201  * Set how many slices are going to be used.
202  * By default uses multiple slices depending on the subsampled height.
203  * This enables multithreading in the official decoder.
204  */
205  if (!avctx->slices) {
206  c->slices = subsampled_height / 120;
207 
208  if (!c->slices)
209  c->slices = 1;
210  else if (c->slices > 256)
211  c->slices = 256;
212  } else {
213  c->slices = avctx->slices;
214  }
215 
216  /* Set compression mode */
217  c->compression = COMP_HUFF;
218 
219  /*
220  * Set the encoding flags:
221  * - Slice count minus 1
222  * - Interlaced encoding mode flag, set to zero for now.
223  * - Compression mode (none/huff)
224  * And write the flags.
225  */
226  c->flags = (c->slices - 1) << 24;
227  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
228  c->flags |= c->compression;
229 
230  AV_WL32(avctx->extradata + 12, c->flags);
231 
232  return 0;
233 }
234 
235 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
236  uint8_t *const src[4], int planes, const int stride[4],
237  int width, int height)
238 {
239  int i, j;
240  int k = 2 * dst_stride;
241  const uint8_t *sg = src[0];
242  const uint8_t *sb = src[1];
243  const uint8_t *sr = src[2];
244  const uint8_t *sa = src[3];
245  unsigned int g;
246 
247  for (j = 0; j < height; j++) {
248  if (planes == 3) {
249  for (i = 0; i < width; i++) {
250  g = sg[i];
251  dst[0][k] = g;
252  g += 0x80;
253  dst[1][k] = sb[i] - g;
254  dst[2][k] = sr[i] - g;
255  k++;
256  }
257  } else {
258  for (i = 0; i < width; i++) {
259  g = sg[i];
260  dst[0][k] = g;
261  g += 0x80;
262  dst[1][k] = sb[i] - g;
263  dst[2][k] = sr[i] - g;
264  dst[3][k] = sa[i];
265  k++;
266  }
267  sa += stride[3];
268  }
269  k += dst_stride - width;
270  sg += stride[0];
271  sb += stride[1];
272  sr += stride[2];
273  }
274 }
275 
276 #undef A
277 #undef B
278 
279 /* Write data to a plane with median prediction */
280 static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst,
281  ptrdiff_t stride, int width, int height)
282 {
283  int i, j;
284  int A, B;
285  uint8_t prev;
286 
287  /* First line uses left neighbour prediction */
288  prev = 0x80; /* Set the initial value */
289  for (i = 0; i < width; i++) {
290  *dst++ = src[i] - prev;
291  prev = src[i];
292  }
293 
294  if (height == 1)
295  return;
296 
297  src += stride;
298 
299  /*
300  * Second line uses top prediction for the first sample,
301  * and median for the rest.
302  */
303  A = B = 0;
304 
305  /* Rest of the coded part uses median prediction */
306  for (j = 1; j < height; j++) {
307  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
308  dst += width;
309  src += stride;
310  }
311 }
312 
313 /* Count the usage of values in a plane */
314 static void count_usage(uint8_t *src, int width,
315  int height, uint64_t *counts)
316 {
317  int i, j;
318 
319  for (j = 0; j < height; j++) {
320  for (i = 0; i < width; i++) {
321  counts[src[i]]++;
322  }
323  src += width;
324  }
325 }
326 
327 /* Calculate the actual huffman codes from the code lengths */
328 static void calculate_codes(HuffEntry *he)
329 {
330  int last, i;
331  uint32_t code;
332 
333  qsort(he, 256, sizeof(*he), ut_huff_cmp_len);
334 
335  last = 255;
336  while (he[last].len == 255 && last)
337  last--;
338 
339  code = 0;
340  for (i = last; i >= 0; i--) {
341  he[i].code = code >> (32 - he[i].len);
342  code += 0x80000000u >> (he[i].len - 1);
343  }
344 
345  qsort(he, 256, sizeof(*he), huff_cmp_sym);
346 }
347 
348 /* Write huffman bit codes to a memory block */
349 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
350  int width, int height, HuffEntry *he)
351 {
352  PutBitContext pb;
353  int i, j;
354  int count;
355 
356  init_put_bits(&pb, dst, dst_size);
357 
358  /* Write the codes */
359  for (j = 0; j < height; j++) {
360  for (i = 0; i < width; i++)
361  put_bits(&pb, he[src[i]].len, he[src[i]].code);
362 
363  src += width;
364  }
365 
366  /* Pad output to a 32-bit boundary */
367  count = put_bits_count(&pb) & 0x1F;
368 
369  if (count)
370  put_bits(&pb, 32 - count, 0);
371 
372  /* Flush the rest with zeroes */
373  flush_put_bits(&pb);
374 
375  /* Return the amount of bytes written */
376  return put_bytes_output(&pb);
377 }
378 
379 static int encode_plane(AVCodecContext *avctx, const uint8_t *src,
380  uint8_t *dst, ptrdiff_t stride, int plane_no,
381  int width, int height, PutByteContext *pb)
382 {
383  UtvideoContext *c = avctx->priv_data;
384  uint8_t lengths[256];
385  uint64_t counts[256] = { 0 };
386 
387  HuffEntry he[256];
388 
389  uint32_t offset = 0, slice_len = 0;
390  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
391  int i, sstart, send = 0;
392  int symbol;
393  int ret;
394 
395  /* Do prediction / make planes */
396  switch (c->frame_pred) {
397  case PRED_NONE:
398  for (i = 0; i < c->slices; i++) {
399  sstart = send;
400  send = height * (i + 1) / c->slices & cmask;
401  av_image_copy_plane(dst + sstart * width, width,
402  src + sstart * stride, stride,
403  width, send - sstart);
404  }
405  break;
406  case PRED_LEFT:
407  for (i = 0; i < c->slices; i++) {
408  sstart = send;
409  send = height * (i + 1) / c->slices & cmask;
410  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
411  }
412  break;
413  case PRED_MEDIAN:
414  for (i = 0; i < c->slices; i++) {
415  sstart = send;
416  send = height * (i + 1) / c->slices & cmask;
417  median_predict(c, src + sstart * stride, dst + sstart * width,
418  stride, width, send - sstart);
419  }
420  break;
421  default:
422  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
423  c->frame_pred);
425  }
426 
427  /* Count the usage of values */
428  count_usage(dst, width, height, counts);
429 
430  /* Check for a special case where only one symbol was used */
431  for (symbol = 0; symbol < 256; symbol++) {
432  /* If non-zero count is found, see if it matches width * height */
433  if (counts[symbol]) {
434  /* Special case if only one symbol was used */
435  if (counts[symbol] == width * (int64_t)height) {
436  /*
437  * Write a zero for the single symbol
438  * used in the plane, else 0xFF.
439  */
440  for (i = 0; i < 256; i++) {
441  if (i == symbol)
442  bytestream2_put_byte(pb, 0);
443  else
444  bytestream2_put_byte(pb, 0xFF);
445  }
446 
447  /* Write zeroes for lengths */
448  for (i = 0; i < c->slices; i++)
449  bytestream2_put_le32(pb, 0);
450 
451  /* And that's all for that plane folks */
452  return 0;
453  }
454  break;
455  }
456  }
457 
458  /* Calculate huffman lengths */
459  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
460  return ret;
461 
462  /*
463  * Write the plane's header into the output packet:
464  * - huffman code lengths (256 bytes)
465  * - slice end offsets (gotten from the slice lengths)
466  */
467  for (i = 0; i < 256; i++) {
468  bytestream2_put_byte(pb, lengths[i]);
469 
470  he[i].len = lengths[i];
471  he[i].sym = i;
472  }
473 
474  /* Calculate the huffman codes themselves */
475  calculate_codes(he);
476 
477  send = 0;
478  for (i = 0; i < c->slices; i++) {
479  sstart = send;
480  send = height * (i + 1) / c->slices & cmask;
481 
482  /*
483  * Write the huffman codes to a buffer,
484  * get the offset in bytes.
485  */
486  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
487  width * height + 4, width,
488  send - sstart, he);
489 
490  slice_len = offset - slice_len;
491 
492  /* Byteswap the written huffman codes */
493  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
494  (uint32_t *) c->slice_bits,
495  slice_len >> 2);
496 
497  /* Write the offset to the stream */
498  bytestream2_put_le32(pb, offset);
499 
500  /* Seek to the data part of the packet */
501  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
502  offset - slice_len, SEEK_CUR);
503 
504  /* Write the slices' data into the output packet */
505  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
506 
507  /* Seek back to the slice offsets */
508  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
509  SEEK_CUR);
510 
511  slice_len = offset;
512  }
513 
514  /* And at the end seek to the end of written slice(s) */
515  bytestream2_seek_p(pb, offset, SEEK_CUR);
516 
517  return 0;
518 }
519 
521  const AVFrame *pic, int *got_packet)
522 {
523  UtvideoContext *c = avctx->priv_data;
524  PutByteContext pb;
525 
526  uint32_t frame_info;
527 
528  uint8_t *dst;
529 
530  int width = avctx->width, height = avctx->height;
531  int i, ret = 0;
532 
533  /* Allocate a new packet if needed, and set it to the pointer dst */
534  ret = ff_alloc_packet(avctx, pkt, (256 + 4 * c->slices + width * height)
535  * c->planes + 4);
536 
537  if (ret < 0)
538  return ret;
539 
540  dst = pkt->data;
541 
542  bytestream2_init_writer(&pb, dst, pkt->size);
543 
544  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
545 
546  if (!c->slice_bits) {
547  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
548  return AVERROR(ENOMEM);
549  }
550 
551  /* In case of RGB, mangle the planes to Ut Video's format */
552  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
553  mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
554  c->planes, pic->linesize, width, height);
555 
556  /* Deal with the planes */
557  switch (avctx->pix_fmt) {
558  case AV_PIX_FMT_GBRP:
559  case AV_PIX_FMT_GBRAP:
560  for (i = 0; i < c->planes; i++) {
561  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
562  c->slice_buffer[i], c->slice_stride, i,
563  width, height, &pb);
564 
565  if (ret) {
566  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
567  return ret;
568  }
569  }
570  break;
571  case AV_PIX_FMT_YUV444P:
572  for (i = 0; i < c->planes; i++) {
573  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
574  pic->linesize[i], i, width, height, &pb);
575 
576  if (ret) {
577  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
578  return ret;
579  }
580  }
581  break;
582  case AV_PIX_FMT_YUV422P:
583  for (i = 0; i < c->planes; i++) {
584  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
585  pic->linesize[i], i, width >> !!i, height, &pb);
586 
587  if (ret) {
588  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
589  return ret;
590  }
591  }
592  break;
593  case AV_PIX_FMT_YUV420P:
594  for (i = 0; i < c->planes; i++) {
595  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
596  pic->linesize[i], i, width >> !!i, height >> !!i,
597  &pb);
598 
599  if (ret) {
600  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
601  return ret;
602  }
603  }
604  break;
605  default:
606  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
607  avctx->pix_fmt);
608  return AVERROR_INVALIDDATA;
609  }
610 
611  /*
612  * Write frame information (LE 32-bit unsigned)
613  * into the output packet.
614  * Contains the prediction method.
615  */
616  frame_info = c->frame_pred << 8;
617  bytestream2_put_le32(&pb, frame_info);
618 
619  pkt->size = bytestream2_tell_p(&pb);
620 
621  /* Packet should be done */
622  *got_packet = 1;
623 
624  return 0;
625 }
626 
627 #define OFFSET(x) offsetof(UtvideoContext, x)
628 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
629 static const AVOption options[] = {
630 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
631  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
632  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
633  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
634  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
635 
636  { NULL},
637 };
638 
639 static const AVClass utvideo_class = {
640  .class_name = "utvideo",
641  .item_name = av_default_item_name,
642  .option = options,
643  .version = LIBAVUTIL_VERSION_INT,
644 };
645 
647  .p.name = "utvideo",
648  CODEC_LONG_NAME("Ut Video"),
649  .p.type = AVMEDIA_TYPE_VIDEO,
650  .p.id = AV_CODEC_ID_UTVIDEO,
651  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
653  .priv_data_size = sizeof(UtvideoContext),
654  .p.priv_class = &utvideo_class,
655  .init = utvideo_encode_init,
657  .close = utvideo_encode_close,
658  .p.pix_fmts = (const enum AVPixelFormat[]) {
661  },
662  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
663 };
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
bswapdsp.h
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
planes
static const struct @346 planes[]
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
HuffEntry::len
uint8_t len
Definition: exr.c:95
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
AVPacket::data
uint8_t * data
Definition: packet.h:374
count_usage
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:314
AVOption
AVOption.
Definition: opt.h:251
encode.h
b
#define b
Definition: input.c:41
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
FFCodec
Definition: codec_internal.h:127
utvideo_encode_frame
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:520
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
write_huff_codes
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:349
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:315
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
PRED_NONE
@ PRED_NONE
Definition: utvideo.h:38
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
width
#define width
intreadwrite.h
huff_cmp_sym
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:55
g
const char * g
Definition: vf_curves.c:127
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:156
B
#define B
Definition: huffyuv.h:42
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
mangle_rgb_planes
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:235
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
ff_utvideo_encoder
const FFCodec ff_utvideo_encoder
Definition: utvideoenc.c:646
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:60
mathops.h
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:41
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
utvideo_encode_init
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:73
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
VE
#define VE
Definition: utvideoenc.c:628
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:40
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
utvideo_encode_close
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:61
UTVIDEO_422
@ UTVIDEO_422
Definition: utvideo.h:60
ut_huff_cmp_len
static int ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideoenc.c:48
UTVIDEO_420
@ UTVIDEO_420
Definition: utvideo.h:59
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1480
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:39
options
static const AVOption options[]
Definition: utvideoenc.c:629
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
UTVIDEO_RGBA
@ UTVIDEO_RGBA
Definition: utvideo.h:58
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
UtvideoContext
Definition: utvideo.h:64
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
avcodec.h
median_predict
static void median_predict(UtvideoContext *c, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:280
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
HuffEntry::code
uint32_t code
Definition: exr.c:97
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
huffman.h
HuffEntry
Definition: exr.c:94
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
encode_plane
static int encode_plane(AVCodecContext *avctx, const uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:379
UTVIDEO_444
@ UTVIDEO_444
Definition: utvideo.h:61
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
utvideo_class
static const AVClass utvideo_class
Definition: utvideoenc.c:639
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:451
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1025
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
bytestream.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
UTVIDEO_RGB
@ UTVIDEO_RGB
Definition: utvideo.h:57
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:589
put_bits.h
COMP_HUFF
@ COMP_HUFF
Definition: utvideo.h:46
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
calculate_codes
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:328
OFFSET
#define OFFSET(x)
Definition: utvideoenc.c:627