FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "mathops.h"
37 #include "utvideo.h"
38 #include "huffman.h"
39 
40 /* Compare huffentry symbols */
41 static int huff_cmp_sym(const void *a, const void *b)
42 {
43  const HuffEntry *aa = a, *bb = b;
44  return aa->sym - bb->sym;
45 }
46 
48 {
49  UtvideoContext *c = avctx->priv_data;
50  int i;
51 
52  av_freep(&c->slice_bits);
53  for (i = 0; i < 4; i++)
54  av_freep(&c->slice_buffer[i]);
55 
56  return 0;
57 }
58 
60 {
61  UtvideoContext *c = avctx->priv_data;
62  int i, subsampled_height;
63  uint32_t original_format;
64 
65  c->avctx = avctx;
66  c->frame_info_size = 4;
67  c->slice_stride = FFALIGN(avctx->width, 32);
68 
69  switch (avctx->pix_fmt) {
70  case AV_PIX_FMT_RGB24:
71  c->planes = 3;
72  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
73  original_format = UTVIDEO_RGB;
74  break;
75  case AV_PIX_FMT_RGBA:
76  c->planes = 4;
77  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
78  original_format = UTVIDEO_RGBA;
79  avctx->bits_per_coded_sample = 32;
80  break;
81  case AV_PIX_FMT_YUV420P:
82  if (avctx->width & 1 || avctx->height & 1) {
83  av_log(avctx, AV_LOG_ERROR,
84  "4:2:0 video requires even width and height.\n");
85  return AVERROR_INVALIDDATA;
86  }
87  c->planes = 3;
88  if (avctx->colorspace == AVCOL_SPC_BT709)
89  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
90  else
91  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
92  original_format = UTVIDEO_420;
93  break;
94  case AV_PIX_FMT_YUV422P:
95  if (avctx->width & 1) {
96  av_log(avctx, AV_LOG_ERROR,
97  "4:2:2 video requires even width.\n");
98  return AVERROR_INVALIDDATA;
99  }
100  c->planes = 3;
101  if (avctx->colorspace == AVCOL_SPC_BT709)
102  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
103  else
104  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
105  original_format = UTVIDEO_422;
106  break;
107  case AV_PIX_FMT_YUV444P:
108  c->planes = 3;
109  if (avctx->colorspace == AVCOL_SPC_BT709)
110  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
111  else
112  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
113  original_format = UTVIDEO_444;
114  break;
115  default:
116  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
117  avctx->pix_fmt);
118  return AVERROR_INVALIDDATA;
119  }
120 
121  ff_bswapdsp_init(&c->bdsp);
123 
124 #if FF_API_PRIVATE_OPT
126  /* Check the prediction method, and error out if unsupported */
127  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
128  av_log(avctx, AV_LOG_WARNING,
129  "Prediction method %d is not supported in Ut Video.\n",
130  avctx->prediction_method);
132  }
133 
134  if (avctx->prediction_method == FF_PRED_PLANE) {
135  av_log(avctx, AV_LOG_ERROR,
136  "Plane prediction is not supported in Ut Video.\n");
138  }
139 
140  /* Convert from libavcodec prediction type to Ut Video's */
141  if (avctx->prediction_method)
144 #endif
145 
146  if (c->frame_pred == PRED_GRADIENT) {
147  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
149  }
150 
151  /*
152  * Check the asked slice count for obviously invalid
153  * values (> 256 or negative).
154  */
155  if (avctx->slices > 256 || avctx->slices < 0) {
156  av_log(avctx, AV_LOG_ERROR,
157  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
158  avctx->slices);
159  return AVERROR(EINVAL);
160  }
161 
162  /* Check that the slice count is not larger than the subsampled height */
163  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
164  if (avctx->slices > subsampled_height) {
165  av_log(avctx, AV_LOG_ERROR,
166  "Slice count %d is larger than the subsampling-applied height %d.\n",
167  avctx->slices, subsampled_height);
168  return AVERROR(EINVAL);
169  }
170 
171  /* extradata size is 4 * 32 bits */
172  avctx->extradata_size = 16;
173 
174  avctx->extradata = av_mallocz(avctx->extradata_size +
176 
177  if (!avctx->extradata) {
178  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
179  utvideo_encode_close(avctx);
180  return AVERROR(ENOMEM);
181  }
182 
183  for (i = 0; i < c->planes; i++) {
184  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
186  if (!c->slice_buffer[i]) {
187  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
188  utvideo_encode_close(avctx);
189  return AVERROR(ENOMEM);
190  }
191  }
192 
193  /*
194  * Set the version of the encoder.
195  * Last byte is "implementation ID", which is
196  * obtained from the creator of the format.
197  * Libavcodec has been assigned with the ID 0xF0.
198  */
199  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
200 
201  /*
202  * Set the "original format"
203  * Not used for anything during decoding.
204  */
205  AV_WL32(avctx->extradata + 4, original_format);
206 
207  /* Write 4 as the 'frame info size' */
208  AV_WL32(avctx->extradata + 8, c->frame_info_size);
209 
210  /*
211  * Set how many slices are going to be used.
212  * By default uses multiple slices depending on the subsampled height.
213  * This enables multithreading in the official decoder.
214  */
215  if (!avctx->slices) {
216  c->slices = subsampled_height / 120;
217 
218  if (!c->slices)
219  c->slices = 1;
220  else if (c->slices > 256)
221  c->slices = 256;
222  } else {
223  c->slices = avctx->slices;
224  }
225 
226  /* Set compression mode */
227  c->compression = COMP_HUFF;
228 
229  /*
230  * Set the encoding flags:
231  * - Slice count minus 1
232  * - Interlaced encoding mode flag, set to zero for now.
233  * - Compression mode (none/huff)
234  * And write the flags.
235  */
236  c->flags = (c->slices - 1) << 24;
237  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
238  c->flags |= c->compression;
239 
240  AV_WL32(avctx->extradata + 12, c->flags);
241 
242  return 0;
243 }
244 
245 static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src,
246  int step, int stride, int width, int height)
247 {
248  int i, j;
249  int k = 2 * dst_stride;
250  unsigned int g;
251 
252  for (j = 0; j < height; j++) {
253  if (step == 3) {
254  for (i = 0; i < width * step; i += step) {
255  g = src[i + 1];
256  dst[0][k] = g;
257  g += 0x80;
258  dst[1][k] = src[i + 2] - g;
259  dst[2][k] = src[i + 0] - g;
260  k++;
261  }
262  } else {
263  for (i = 0; i < width * step; i += step) {
264  g = src[i + 1];
265  dst[0][k] = g;
266  g += 0x80;
267  dst[1][k] = src[i + 2] - g;
268  dst[2][k] = src[i + 0] - g;
269  dst[3][k] = src[i + 3];
270  k++;
271  }
272  }
273  k += dst_stride - width;
274  src += stride;
275  }
276 }
277 
278 /* Write data to a plane with left prediction */
279 static void left_predict(uint8_t *src, uint8_t *dst, int stride,
280  int width, int height)
281 {
282  int i, j;
283  uint8_t prev;
284 
285  prev = 0x80; /* Set the initial value */
286  for (j = 0; j < height; j++) {
287  for (i = 0; i < width; i++) {
288  *dst++ = src[i] - prev;
289  prev = src[i];
290  }
291  src += stride;
292  }
293 }
294 
295 #undef A
296 #undef B
297 
298 /* Write data to a plane with median prediction */
300  int width, int height)
301 {
302  int i, j;
303  int A, B;
304  uint8_t prev;
305 
306  /* First line uses left neighbour prediction */
307  prev = 0x80; /* Set the initial value */
308  for (i = 0; i < width; i++) {
309  *dst++ = src[i] - prev;
310  prev = src[i];
311  }
312 
313  if (height == 1)
314  return;
315 
316  src += stride;
317 
318  /*
319  * Second line uses top prediction for the first sample,
320  * and median for the rest.
321  */
322  A = B = 0;
323 
324  /* Rest of the coded part uses median prediction */
325  for (j = 1; j < height; j++) {
326  c->llvidencdsp.sub_median_pred(dst, src - stride, src, width, &A, &B);
327  dst += width;
328  src += stride;
329  }
330 }
331 
332 /* Count the usage of values in a plane */
333 static void count_usage(uint8_t *src, int width,
334  int height, uint64_t *counts)
335 {
336  int i, j;
337 
338  for (j = 0; j < height; j++) {
339  for (i = 0; i < width; i++) {
340  counts[src[i]]++;
341  }
342  src += width;
343  }
344 }
345 
346 /* Calculate the actual huffman codes from the code lengths */
347 static void calculate_codes(HuffEntry *he)
348 {
349  int last, i;
350  uint32_t code;
351 
352  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
353 
354  last = 255;
355  while (he[last].len == 255 && last)
356  last--;
357 
358  code = 1;
359  for (i = last; i >= 0; i--) {
360  he[i].code = code >> (32 - he[i].len);
361  code += 0x80000000u >> (he[i].len - 1);
362  }
363 
364  qsort(he, 256, sizeof(*he), huff_cmp_sym);
365 }
366 
367 /* Write huffman bit codes to a memory block */
368 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
369  int width, int height, HuffEntry *he)
370 {
371  PutBitContext pb;
372  int i, j;
373  int count;
374 
375  init_put_bits(&pb, dst, dst_size);
376 
377  /* Write the codes */
378  for (j = 0; j < height; j++) {
379  for (i = 0; i < width; i++)
380  put_bits(&pb, he[src[i]].len, he[src[i]].code);
381 
382  src += width;
383  }
384 
385  /* Pad output to a 32-bit boundary */
386  count = put_bits_count(&pb) & 0x1F;
387 
388  if (count)
389  put_bits(&pb, 32 - count, 0);
390 
391  /* Get the amount of bits written */
392  count = put_bits_count(&pb);
393 
394  /* Flush the rest with zeroes */
395  flush_put_bits(&pb);
396 
397  return count;
398 }
399 
401  uint8_t *dst, int stride, int plane_no,
402  int width, int height, PutByteContext *pb)
403 {
404  UtvideoContext *c = avctx->priv_data;
405  uint8_t lengths[256];
406  uint64_t counts[256] = { 0 };
407 
408  HuffEntry he[256];
409 
410  uint32_t offset = 0, slice_len = 0;
411  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
412  int i, sstart, send = 0;
413  int symbol;
414  int ret;
415 
416  /* Do prediction / make planes */
417  switch (c->frame_pred) {
418  case PRED_NONE:
419  for (i = 0; i < c->slices; i++) {
420  sstart = send;
421  send = height * (i + 1) / c->slices & cmask;
422  av_image_copy_plane(dst + sstart * width, width,
423  src + sstart * stride, stride,
424  width, send - sstart);
425  }
426  break;
427  case PRED_LEFT:
428  for (i = 0; i < c->slices; i++) {
429  sstart = send;
430  send = height * (i + 1) / c->slices & cmask;
431  left_predict(src + sstart * stride, dst + sstart * width,
432  stride, width, send - sstart);
433  }
434  break;
435  case PRED_MEDIAN:
436  for (i = 0; i < c->slices; i++) {
437  sstart = send;
438  send = height * (i + 1) / c->slices & cmask;
439  median_predict(c, src + sstart * stride, dst + sstart * width,
440  stride, width, send - sstart);
441  }
442  break;
443  default:
444  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
445  c->frame_pred);
447  }
448 
449  /* Count the usage of values */
450  count_usage(dst, width, height, counts);
451 
452  /* Check for a special case where only one symbol was used */
453  for (symbol = 0; symbol < 256; symbol++) {
454  /* If non-zero count is found, see if it matches width * height */
455  if (counts[symbol]) {
456  /* Special case if only one symbol was used */
457  if (counts[symbol] == width * (int64_t)height) {
458  /*
459  * Write a zero for the single symbol
460  * used in the plane, else 0xFF.
461  */
462  for (i = 0; i < 256; i++) {
463  if (i == symbol)
464  bytestream2_put_byte(pb, 0);
465  else
466  bytestream2_put_byte(pb, 0xFF);
467  }
468 
469  /* Write zeroes for lengths */
470  for (i = 0; i < c->slices; i++)
471  bytestream2_put_le32(pb, 0);
472 
473  /* And that's all for that plane folks */
474  return 0;
475  }
476  break;
477  }
478  }
479 
480  /* Calculate huffman lengths */
481  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
482  return ret;
483 
484  /*
485  * Write the plane's header into the output packet:
486  * - huffman code lengths (256 bytes)
487  * - slice end offsets (gotten from the slice lengths)
488  */
489  for (i = 0; i < 256; i++) {
490  bytestream2_put_byte(pb, lengths[i]);
491 
492  he[i].len = lengths[i];
493  he[i].sym = i;
494  }
495 
496  /* Calculate the huffman codes themselves */
497  calculate_codes(he);
498 
499  send = 0;
500  for (i = 0; i < c->slices; i++) {
501  sstart = send;
502  send = height * (i + 1) / c->slices & cmask;
503 
504  /*
505  * Write the huffman codes to a buffer,
506  * get the offset in bits and convert to bytes.
507  */
508  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
509  width * height + 4, width,
510  send - sstart, he) >> 3;
511 
512  slice_len = offset - slice_len;
513 
514  /* Byteswap the written huffman codes */
515  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
516  (uint32_t *) c->slice_bits,
517  slice_len >> 2);
518 
519  /* Write the offset to the stream */
520  bytestream2_put_le32(pb, offset);
521 
522  /* Seek to the data part of the packet */
523  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
524  offset - slice_len, SEEK_CUR);
525 
526  /* Write the slices' data into the output packet */
527  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
528 
529  /* Seek back to the slice offsets */
530  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
531  SEEK_CUR);
532 
533  slice_len = offset;
534  }
535 
536  /* And at the end seek to the end of written slice(s) */
537  bytestream2_seek_p(pb, offset, SEEK_CUR);
538 
539  return 0;
540 }
541 
543  const AVFrame *pic, int *got_packet)
544 {
545  UtvideoContext *c = avctx->priv_data;
546  PutByteContext pb;
547 
548  uint32_t frame_info;
549 
550  uint8_t *dst;
551 
552  int width = avctx->width, height = avctx->height;
553  int i, ret = 0;
554 
555  /* Allocate a new packet if needed, and set it to the pointer dst */
556  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
557  c->planes + 4, 0);
558 
559  if (ret < 0)
560  return ret;
561 
562  dst = pkt->data;
563 
564  bytestream2_init_writer(&pb, dst, pkt->size);
565 
566  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
567 
568  if (!c->slice_bits) {
569  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
570  return AVERROR(ENOMEM);
571  }
572 
573  /* In case of RGB, mangle the planes to Ut Video's format */
574  if (avctx->pix_fmt == AV_PIX_FMT_RGBA || avctx->pix_fmt == AV_PIX_FMT_RGB24)
576  c->planes, pic->linesize[0], width, height);
577 
578  /* Deal with the planes */
579  switch (avctx->pix_fmt) {
580  case AV_PIX_FMT_RGB24:
581  case AV_PIX_FMT_RGBA:
582  for (i = 0; i < c->planes; i++) {
583  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
584  c->slice_buffer[i], c->slice_stride, i,
585  width, height, &pb);
586 
587  if (ret) {
588  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
589  return ret;
590  }
591  }
592  break;
593  case AV_PIX_FMT_YUV444P:
594  for (i = 0; i < c->planes; i++) {
595  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
596  pic->linesize[i], i, width, height, &pb);
597 
598  if (ret) {
599  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
600  return ret;
601  }
602  }
603  break;
604  case AV_PIX_FMT_YUV422P:
605  for (i = 0; i < c->planes; i++) {
606  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
607  pic->linesize[i], i, width >> !!i, height, &pb);
608 
609  if (ret) {
610  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
611  return ret;
612  }
613  }
614  break;
615  case AV_PIX_FMT_YUV420P:
616  for (i = 0; i < c->planes; i++) {
617  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
618  pic->linesize[i], i, width >> !!i, height >> !!i,
619  &pb);
620 
621  if (ret) {
622  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
623  return ret;
624  }
625  }
626  break;
627  default:
628  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
629  avctx->pix_fmt);
630  return AVERROR_INVALIDDATA;
631  }
632 
633  /*
634  * Write frame information (LE 32-bit unsigned)
635  * into the output packet.
636  * Contains the prediction method.
637  */
638  frame_info = c->frame_pred << 8;
639  bytestream2_put_le32(&pb, frame_info);
640 
641  /*
642  * At least currently Ut Video is IDR only.
643  * Set flags accordingly.
644  */
645 #if FF_API_CODED_FRAME
647  avctx->coded_frame->key_frame = 1;
650 #endif
651 
652  pkt->size = bytestream2_tell_p(&pb);
653  pkt->flags |= AV_PKT_FLAG_KEY;
654 
655  /* Packet should be done */
656  *got_packet = 1;
657 
658  return 0;
659 }
660 
661 #define OFFSET(x) offsetof(UtvideoContext, x)
662 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
663 static const AVOption options[] = {
664 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
665  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
666  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
667  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
668  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
669 
670  { NULL},
671 };
672 
673 static const AVClass utvideo_class = {
674  .class_name = "utvideo",
675  .item_name = av_default_item_name,
676  .option = options,
677  .version = LIBAVUTIL_VERSION_INT,
678 };
679 
681  .name = "utvideo",
682  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
683  .type = AVMEDIA_TYPE_VIDEO,
684  .id = AV_CODEC_ID_UTVIDEO,
685  .priv_data_size = sizeof(UtvideoContext),
686  .priv_class = &utvideo_class,
688  .encode2 = utvideo_encode_frame,
689  .close = utvideo_encode_close,
691  .pix_fmts = (const enum AVPixelFormat[]) {
694  },
695 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:452
int slice_stride
Definition: utvideo.h:84
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2332
This structure describes decoded (raw) audio or video data.
Definition: frame.h:190
AVOption.
Definition: opt.h:246
static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, int step, int stride, int width, int height)
Definition: utvideoenc.c:245
uint32_t flags
Definition: utvideo.h:76
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:41
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
const char * g
Definition: vf_curves.c:112
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int slice_bits_size
Definition: utvideo.h:86
int size
Definition: avcodec.h:1613
const char * b
Definition: vf_curves.c:113
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
LLVidEncDSPContext llvidencdsp
Definition: utvideo.h:74
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1915
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:59
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
static AVPacket pkt
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3620
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1057
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:680
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1802
#define height
uint8_t * data
Definition: avcodec.h:1612
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:542
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:3082
uint32_t code
Definition: magicyuv.c:50
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static void left_predict(uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:279
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1644
BswapDSPContext bdsp
Definition: utvideo.h:72
const int ff_ut_pred_order[5]
Definition: utvideo.c:30
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
AVCodecContext * avctx
Definition: utvideo.h:71
const char * name
Name of the codec implementation.
Definition: avcodec.h:3627
uint32_t frame_info_size
Definition: utvideo.h:76
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:368
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1027
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1618
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:299
int compression
Definition: utvideo.h:79
static const AVOption options[]
Definition: utvideoenc.c:663
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:264
#define width
int width
picture width / height.
Definition: avcodec.h:1874
static const AVClass utvideo_class
Definition: utvideoenc.c:673
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, int stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:400
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
#define OFFSET(x)
Definition: utvideoenc.c:661
Common Ut Video header.
int frame_pred
Definition: utvideo.h:81
uint8_t len
Definition: magicyuv.c:49
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
Libavcodec external API header.
attribute_deprecated int prediction_method
Definition: avcodec.h:2078
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:221
main external API structure.
Definition: avcodec.h:1687
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1719
int extradata_size
Definition: avcodec.h:1803
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2417
huffman tree builder and VLC generator
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1723
#define u(width,...)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
uint8_t * slice_bits
Definition: utvideo.h:85
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:204
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:347
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:333
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
uint16_t sym
Definition: magicyuv.c:48
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3110
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:739
int slices
Number of slices.
Definition: avcodec.h:2440
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1729
uint8_t * slice_buffer[4]
Definition: utvideo.h:85
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
int len
Definition: vf_geq.c:46
#define FF_PRED_PLANE
Definition: avcodec.h:2080
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:259
static AVCodec * c
#define av_freep(p)
#define VE
Definition: utvideoenc.c:662
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:306
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:47
This structure stores compressed data.
Definition: avcodec.h:1589
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
bitstream writer API