FFmpeg
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "mathops.h"
37 #include "utvideo.h"
38 #include "huffman.h"
39 
40 typedef struct HuffEntry {
41  uint16_t sym;
42  uint8_t len;
43  uint32_t code;
44 } HuffEntry;
45 
46 #if FF_API_PRIVATE_OPT
47 static const int ut_pred_order[5] = {
49 };
50 #endif
51 
52 /* Compare huffman tree nodes */
53 static int ut_huff_cmp_len(const void *a, const void *b)
54 {
55  const HuffEntry *aa = a, *bb = b;
56  return (aa->len - bb->len)*256 + aa->sym - bb->sym;
57 }
58 
59 /* Compare huffentry symbols */
60 static int huff_cmp_sym(const void *a, const void *b)
61 {
62  const HuffEntry *aa = a, *bb = b;
63  return aa->sym - bb->sym;
64 }
65 
67 {
68  UtvideoContext *c = avctx->priv_data;
69  int i;
70 
71  av_freep(&c->slice_bits);
72  for (i = 0; i < 4; i++)
73  av_freep(&c->slice_buffer[i]);
74 
75  return 0;
76 }
77 
79 {
80  UtvideoContext *c = avctx->priv_data;
81  int i, subsampled_height;
82  uint32_t original_format;
83 
84  c->avctx = avctx;
85  c->frame_info_size = 4;
86  c->slice_stride = FFALIGN(avctx->width, 32);
87 
88  switch (avctx->pix_fmt) {
89  case AV_PIX_FMT_GBRP:
90  c->planes = 3;
91  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
92  original_format = UTVIDEO_RGB;
93  break;
94  case AV_PIX_FMT_GBRAP:
95  c->planes = 4;
96  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
97  original_format = UTVIDEO_RGBA;
98  avctx->bits_per_coded_sample = 32;
99  break;
100  case AV_PIX_FMT_YUV420P:
101  if (avctx->width & 1 || avctx->height & 1) {
102  av_log(avctx, AV_LOG_ERROR,
103  "4:2:0 video requires even width and height.\n");
104  return AVERROR_INVALIDDATA;
105  }
106  c->planes = 3;
107  if (avctx->colorspace == AVCOL_SPC_BT709)
108  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
109  else
110  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
111  original_format = UTVIDEO_420;
112  break;
113  case AV_PIX_FMT_YUV422P:
114  if (avctx->width & 1) {
115  av_log(avctx, AV_LOG_ERROR,
116  "4:2:2 video requires even width.\n");
117  return AVERROR_INVALIDDATA;
118  }
119  c->planes = 3;
120  if (avctx->colorspace == AVCOL_SPC_BT709)
121  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
122  else
123  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
124  original_format = UTVIDEO_422;
125  break;
126  case AV_PIX_FMT_YUV444P:
127  c->planes = 3;
128  if (avctx->colorspace == AVCOL_SPC_BT709)
129  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
130  else
131  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
132  original_format = UTVIDEO_444;
133  break;
134  default:
135  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
136  avctx->pix_fmt);
137  return AVERROR_INVALIDDATA;
138  }
139 
140  ff_bswapdsp_init(&c->bdsp);
141  ff_llvidencdsp_init(&c->llvidencdsp);
142 
143 #if FF_API_PRIVATE_OPT
145  /* Check the prediction method, and error out if unsupported */
146  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
147  av_log(avctx, AV_LOG_WARNING,
148  "Prediction method %d is not supported in Ut Video.\n",
149  avctx->prediction_method);
151  }
152 
153  if (avctx->prediction_method == FF_PRED_PLANE) {
154  av_log(avctx, AV_LOG_ERROR,
155  "Plane prediction is not supported in Ut Video.\n");
157  }
158 
159  /* Convert from libavcodec prediction type to Ut Video's */
160  if (avctx->prediction_method)
161  c->frame_pred = ut_pred_order[avctx->prediction_method];
163 #endif
164 
165  if (c->frame_pred == PRED_GRADIENT) {
166  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
168  }
169 
170  /*
171  * Check the asked slice count for obviously invalid
172  * values (> 256 or negative).
173  */
174  if (avctx->slices > 256 || avctx->slices < 0) {
175  av_log(avctx, AV_LOG_ERROR,
176  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
177  avctx->slices);
178  return AVERROR(EINVAL);
179  }
180 
181  /* Check that the slice count is not larger than the subsampled height */
182  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
183  if (avctx->slices > subsampled_height) {
184  av_log(avctx, AV_LOG_ERROR,
185  "Slice count %d is larger than the subsampling-applied height %d.\n",
186  avctx->slices, subsampled_height);
187  return AVERROR(EINVAL);
188  }
189 
190  /* extradata size is 4 * 32 bits */
191  avctx->extradata_size = 16;
192 
193  avctx->extradata = av_mallocz(avctx->extradata_size +
195 
196  if (!avctx->extradata) {
197  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
198  utvideo_encode_close(avctx);
199  return AVERROR(ENOMEM);
200  }
201 
202  for (i = 0; i < c->planes; i++) {
203  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
205  if (!c->slice_buffer[i]) {
206  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
207  utvideo_encode_close(avctx);
208  return AVERROR(ENOMEM);
209  }
210  }
211 
212  /*
213  * Set the version of the encoder.
214  * Last byte is "implementation ID", which is
215  * obtained from the creator of the format.
216  * Libavcodec has been assigned with the ID 0xF0.
217  */
218  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
219 
220  /*
221  * Set the "original format"
222  * Not used for anything during decoding.
223  */
224  AV_WL32(avctx->extradata + 4, original_format);
225 
226  /* Write 4 as the 'frame info size' */
227  AV_WL32(avctx->extradata + 8, c->frame_info_size);
228 
229  /*
230  * Set how many slices are going to be used.
231  * By default uses multiple slices depending on the subsampled height.
232  * This enables multithreading in the official decoder.
233  */
234  if (!avctx->slices) {
235  c->slices = subsampled_height / 120;
236 
237  if (!c->slices)
238  c->slices = 1;
239  else if (c->slices > 256)
240  c->slices = 256;
241  } else {
242  c->slices = avctx->slices;
243  }
244 
245  /* Set compression mode */
246  c->compression = COMP_HUFF;
247 
248  /*
249  * Set the encoding flags:
250  * - Slice count minus 1
251  * - Interlaced encoding mode flag, set to zero for now.
252  * - Compression mode (none/huff)
253  * And write the flags.
254  */
255  c->flags = (c->slices - 1) << 24;
256  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
257  c->flags |= c->compression;
258 
259  AV_WL32(avctx->extradata + 12, c->flags);
260 
261  return 0;
262 }
263 
264 static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride,
265  uint8_t *const src[4], int planes, const int stride[4],
266  int width, int height)
267 {
268  int i, j;
269  int k = 2 * dst_stride;
270  const uint8_t *sg = src[0];
271  const uint8_t *sb = src[1];
272  const uint8_t *sr = src[2];
273  const uint8_t *sa = src[3];
274  unsigned int g;
275 
276  for (j = 0; j < height; j++) {
277  if (planes == 3) {
278  for (i = 0; i < width; i++) {
279  g = sg[i];
280  dst[0][k] = g;
281  g += 0x80;
282  dst[1][k] = sb[i] - g;
283  dst[2][k] = sr[i] - g;
284  k++;
285  }
286  } else {
287  for (i = 0; i < width; i++) {
288  g = sg[i];
289  dst[0][k] = g;
290  g += 0x80;
291  dst[1][k] = sb[i] - g;
292  dst[2][k] = sr[i] - g;
293  dst[3][k] = sa[i];
294  k++;
295  }
296  sa += stride[3];
297  }
298  k += dst_stride - width;
299  sg += stride[0];
300  sb += stride[1];
301  sr += stride[2];
302  }
303 }
304 
305 #undef A
306 #undef B
307 
308 /* Write data to a plane with median prediction */
310  ptrdiff_t stride, int width, int height)
311 {
312  int i, j;
313  int A, B;
314  uint8_t prev;
315 
316  /* First line uses left neighbour prediction */
317  prev = 0x80; /* Set the initial value */
318  for (i = 0; i < width; i++) {
319  *dst++ = src[i] - prev;
320  prev = src[i];
321  }
322 
323  if (height == 1)
324  return;
325 
326  src += stride;
327 
328  /*
329  * Second line uses top prediction for the first sample,
330  * and median for the rest.
331  */
332  A = B = 0;
333 
334  /* Rest of the coded part uses median prediction */
335  for (j = 1; j < height; j++) {
336  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
337  dst += width;
338  src += stride;
339  }
340 }
341 
342 /* Count the usage of values in a plane */
343 static void count_usage(uint8_t *src, int width,
344  int height, uint64_t *counts)
345 {
346  int i, j;
347 
348  for (j = 0; j < height; j++) {
349  for (i = 0; i < width; i++) {
350  counts[src[i]]++;
351  }
352  src += width;
353  }
354 }
355 
356 /* Calculate the actual huffman codes from the code lengths */
357 static void calculate_codes(HuffEntry *he)
358 {
359  int last, i;
360  uint32_t code;
361 
362  qsort(he, 256, sizeof(*he), ut_huff_cmp_len);
363 
364  last = 255;
365  while (he[last].len == 255 && last)
366  last--;
367 
368  code = 0;
369  for (i = last; i >= 0; i--) {
370  he[i].code = code >> (32 - he[i].len);
371  code += 0x80000000u >> (he[i].len - 1);
372  }
373 
374  qsort(he, 256, sizeof(*he), huff_cmp_sym);
375 }
376 
377 /* Write huffman bit codes to a memory block */
378 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
379  int width, int height, HuffEntry *he)
380 {
381  PutBitContext pb;
382  int i, j;
383  int count;
384 
385  init_put_bits(&pb, dst, dst_size);
386 
387  /* Write the codes */
388  for (j = 0; j < height; j++) {
389  for (i = 0; i < width; i++)
390  put_bits(&pb, he[src[i]].len, he[src[i]].code);
391 
392  src += width;
393  }
394 
395  /* Pad output to a 32-bit boundary */
396  count = put_bits_count(&pb) & 0x1F;
397 
398  if (count)
399  put_bits(&pb, 32 - count, 0);
400 
401  /* Flush the rest with zeroes */
402  flush_put_bits(&pb);
403 
404  /* Return the amount of bytes written */
405  return put_bytes_output(&pb);
406 }
407 
409  uint8_t *dst, ptrdiff_t stride, int plane_no,
410  int width, int height, PutByteContext *pb)
411 {
412  UtvideoContext *c = avctx->priv_data;
413  uint8_t lengths[256];
414  uint64_t counts[256] = { 0 };
415 
416  HuffEntry he[256];
417 
418  uint32_t offset = 0, slice_len = 0;
419  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
420  int i, sstart, send = 0;
421  int symbol;
422  int ret;
423 
424  /* Do prediction / make planes */
425  switch (c->frame_pred) {
426  case PRED_NONE:
427  for (i = 0; i < c->slices; i++) {
428  sstart = send;
429  send = height * (i + 1) / c->slices & cmask;
430  av_image_copy_plane(dst + sstart * width, width,
431  src + sstart * stride, stride,
432  width, send - sstart);
433  }
434  break;
435  case PRED_LEFT:
436  for (i = 0; i < c->slices; i++) {
437  sstart = send;
438  send = height * (i + 1) / c->slices & cmask;
439  c->llvidencdsp.sub_left_predict(dst + sstart * width, src + sstart * stride, stride, width, send - sstart);
440  }
441  break;
442  case PRED_MEDIAN:
443  for (i = 0; i < c->slices; i++) {
444  sstart = send;
445  send = height * (i + 1) / c->slices & cmask;
446  median_predict(c, src + sstart * stride, dst + sstart * width,
447  stride, width, send - sstart);
448  }
449  break;
450  default:
451  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
452  c->frame_pred);
454  }
455 
456  /* Count the usage of values */
457  count_usage(dst, width, height, counts);
458 
459  /* Check for a special case where only one symbol was used */
460  for (symbol = 0; symbol < 256; symbol++) {
461  /* If non-zero count is found, see if it matches width * height */
462  if (counts[symbol]) {
463  /* Special case if only one symbol was used */
464  if (counts[symbol] == width * (int64_t)height) {
465  /*
466  * Write a zero for the single symbol
467  * used in the plane, else 0xFF.
468  */
469  for (i = 0; i < 256; i++) {
470  if (i == symbol)
471  bytestream2_put_byte(pb, 0);
472  else
473  bytestream2_put_byte(pb, 0xFF);
474  }
475 
476  /* Write zeroes for lengths */
477  for (i = 0; i < c->slices; i++)
478  bytestream2_put_le32(pb, 0);
479 
480  /* And that's all for that plane folks */
481  return 0;
482  }
483  break;
484  }
485  }
486 
487  /* Calculate huffman lengths */
488  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
489  return ret;
490 
491  /*
492  * Write the plane's header into the output packet:
493  * - huffman code lengths (256 bytes)
494  * - slice end offsets (gotten from the slice lengths)
495  */
496  for (i = 0; i < 256; i++) {
497  bytestream2_put_byte(pb, lengths[i]);
498 
499  he[i].len = lengths[i];
500  he[i].sym = i;
501  }
502 
503  /* Calculate the huffman codes themselves */
504  calculate_codes(he);
505 
506  send = 0;
507  for (i = 0; i < c->slices; i++) {
508  sstart = send;
509  send = height * (i + 1) / c->slices & cmask;
510 
511  /*
512  * Write the huffman codes to a buffer,
513  * get the offset in bytes.
514  */
515  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
516  width * height + 4, width,
517  send - sstart, he);
518 
519  slice_len = offset - slice_len;
520 
521  /* Byteswap the written huffman codes */
522  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
523  (uint32_t *) c->slice_bits,
524  slice_len >> 2);
525 
526  /* Write the offset to the stream */
527  bytestream2_put_le32(pb, offset);
528 
529  /* Seek to the data part of the packet */
530  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
531  offset - slice_len, SEEK_CUR);
532 
533  /* Write the slices' data into the output packet */
534  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
535 
536  /* Seek back to the slice offsets */
537  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
538  SEEK_CUR);
539 
540  slice_len = offset;
541  }
542 
543  /* And at the end seek to the end of written slice(s) */
544  bytestream2_seek_p(pb, offset, SEEK_CUR);
545 
546  return 0;
547 }
548 
550  const AVFrame *pic, int *got_packet)
551 {
552  UtvideoContext *c = avctx->priv_data;
553  PutByteContext pb;
554 
555  uint32_t frame_info;
556 
557  uint8_t *dst;
558 
559  int width = avctx->width, height = avctx->height;
560  int i, ret = 0;
561 
562  /* Allocate a new packet if needed, and set it to the pointer dst */
563  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
564  c->planes + 4, 0);
565 
566  if (ret < 0)
567  return ret;
568 
569  dst = pkt->data;
570 
571  bytestream2_init_writer(&pb, dst, pkt->size);
572 
573  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
574 
575  if (!c->slice_bits) {
576  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
577  return AVERROR(ENOMEM);
578  }
579 
580  /* In case of RGB, mangle the planes to Ut Video's format */
581  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP || avctx->pix_fmt == AV_PIX_FMT_GBRP)
582  mangle_rgb_planes(c->slice_buffer, c->slice_stride, pic->data,
583  c->planes, pic->linesize, width, height);
584 
585  /* Deal with the planes */
586  switch (avctx->pix_fmt) {
587  case AV_PIX_FMT_GBRP:
588  case AV_PIX_FMT_GBRAP:
589  for (i = 0; i < c->planes; i++) {
590  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
591  c->slice_buffer[i], c->slice_stride, i,
592  width, height, &pb);
593 
594  if (ret) {
595  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
596  return ret;
597  }
598  }
599  break;
600  case AV_PIX_FMT_YUV444P:
601  for (i = 0; i < c->planes; i++) {
602  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
603  pic->linesize[i], i, width, height, &pb);
604 
605  if (ret) {
606  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
607  return ret;
608  }
609  }
610  break;
611  case AV_PIX_FMT_YUV422P:
612  for (i = 0; i < c->planes; i++) {
613  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
614  pic->linesize[i], i, width >> !!i, height, &pb);
615 
616  if (ret) {
617  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
618  return ret;
619  }
620  }
621  break;
622  case AV_PIX_FMT_YUV420P:
623  for (i = 0; i < c->planes; i++) {
624  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
625  pic->linesize[i], i, width >> !!i, height >> !!i,
626  &pb);
627 
628  if (ret) {
629  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
630  return ret;
631  }
632  }
633  break;
634  default:
635  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
636  avctx->pix_fmt);
637  return AVERROR_INVALIDDATA;
638  }
639 
640  /*
641  * Write frame information (LE 32-bit unsigned)
642  * into the output packet.
643  * Contains the prediction method.
644  */
645  frame_info = c->frame_pred << 8;
646  bytestream2_put_le32(&pb, frame_info);
647 
648  /*
649  * At least currently Ut Video is IDR only.
650  * Set flags accordingly.
651  */
652 #if FF_API_CODED_FRAME
654  avctx->coded_frame->key_frame = 1;
657 #endif
658 
659  pkt->size = bytestream2_tell_p(&pb);
661 
662  /* Packet should be done */
663  *got_packet = 1;
664 
665  return 0;
666 }
667 
668 #define OFFSET(x) offsetof(UtvideoContext, x)
669 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
670 static const AVOption options[] = {
671 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
672  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
673  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
674  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
675  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
676 
677  { NULL},
678 };
679 
680 static const AVClass utvideo_class = {
681  .class_name = "utvideo",
682  .item_name = av_default_item_name,
683  .option = options,
684  .version = LIBAVUTIL_VERSION_INT,
685 };
686 
688  .name = "utvideo",
689  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
690  .type = AVMEDIA_TYPE_VIDEO,
691  .id = AV_CODEC_ID_UTVIDEO,
692  .priv_data_size = sizeof(UtvideoContext),
693  .priv_class = &utvideo_class,
695  .encode2 = utvideo_encode_frame,
696  .close = utvideo_encode_close,
697  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
698  .pix_fmts = (const enum AVPixelFormat[]) {
701  },
702 };
utvideo.h
AVCodec
AVCodec.
Definition: codec.h:197
bswapdsp.h
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
UTVIDEO_422
@ UTVIDEO_422
Definition: utvideo.h:60
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:40
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:93
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
HuffEntry::len
uint8_t len
Definition: exr.c:94
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:66
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:324
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
count_usage
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:343
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
utvideo_encode_frame
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:549
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
UTVIDEO_RGBA
@ UTVIDEO_RGBA
Definition: utvideo.h:58
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
A
#define A(x)
Definition: vp56_arith.h:28
AVCodecContext::prediction_method
attribute_deprecated int prediction_method
Definition: avcodec.h:895
write_huff_codes
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:378
UTVIDEO_420
@ UTVIDEO_420
Definition: utvideo.h:59
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:402
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:39
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
width
#define width
intreadwrite.h
huff_cmp_sym
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:60
COMP_HUFF
@ COMP_HUFF
Definition: utvideo.h:46
g
const char * g
Definition: vf_curves.c:117
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
HuffEntry::sym
uint16_t sym
Definition: exr.c:95
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:202
mangle_rgb_planes
static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, uint8_t *const src[4], int planes, const int stride[4], int width, int height)
Definition: utvideoenc.c:264
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:51
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
mathops.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
utvideo_encode_init
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:78
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:91
UTVIDEO_RGB
@ UTVIDEO_RGB
Definition: utvideo.h:57
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
PutByteContext
Definition: bytestream.h:37
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:407
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
VE
#define VE
Definition: utvideoenc.c:669
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
median_predict
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height)
Definition: utvideoenc.c:309
utvideo_encode_close
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:66
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
ut_huff_cmp_len
static int ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideoenc.c:53
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
i
int i
Definition: input.c:407
options
static const AVOption options[]
Definition: utvideoenc.c:670
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:84
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
UTVIDEO_444
@ UTVIDEO_444
Definition: utvideo.h:61
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
uint8_t
uint8_t
Definition: audio_convert.c:194
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
UtvideoContext
Definition: utvideo.h:64
len
int len
Definition: vorbis_enc_data.h:452
encode_plane
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:408
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
PRED_NONE
@ PRED_NONE
Definition: utvideo.h:38
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
HuffEntry::code
uint32_t code
Definition: exr.c:96
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
ut_pred_order
static const int ut_pred_order[5]
Definition: utvideoenc.c:47
B
#define B
Definition: huffyuvdsp.h:32
AVCodecContext::coded_frame
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:1764
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
FF_PRED_PLANE
#define FF_PRED_PLANE
Definition: avcodec.h:897
huffman.h
HuffEntry
Definition: exr.c:93
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
planes
static const struct @322 planes[]
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:41
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
utvideo_class
static const AVClass utvideo_class
Definition: utvideoenc.c:680
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:147
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1187
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
bytestream.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:355
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_utvideo_encoder
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:687
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
calculate_codes
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:357
OFFSET
#define OFFSET(x)
Definition: utvideoenc.c:668