FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "put_bits.h"
36 #include "huffyuvencdsp.h"
37 #include "mathops.h"
38 #include "utvideo.h"
39 #include "huffman.h"
40 
41 /* Compare huffentry symbols */
42 static int huff_cmp_sym(const void *a, const void *b)
43 {
44  const HuffEntry *aa = a, *bb = b;
45  return aa->sym - bb->sym;
46 }
47 
49 {
50  UtvideoContext *c = avctx->priv_data;
51  int i;
52 
53  av_freep(&c->slice_bits);
54  for (i = 0; i < 4; i++)
55  av_freep(&c->slice_buffer[i]);
56 
57  return 0;
58 }
59 
61 {
62  UtvideoContext *c = avctx->priv_data;
63  int i, subsampled_height;
64  uint32_t original_format;
65 
66  c->avctx = avctx;
67  c->frame_info_size = 4;
68  c->slice_stride = FFALIGN(avctx->width, 32);
69 
70  switch (avctx->pix_fmt) {
71  case AV_PIX_FMT_RGB24:
72  c->planes = 3;
73  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
74  original_format = UTVIDEO_RGB;
75  break;
76  case AV_PIX_FMT_RGBA:
77  c->planes = 4;
78  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
79  original_format = UTVIDEO_RGBA;
80  break;
81  case AV_PIX_FMT_YUV420P:
82  if (avctx->width & 1 || avctx->height & 1) {
83  av_log(avctx, AV_LOG_ERROR,
84  "4:2:0 video requires even width and height.\n");
85  return AVERROR_INVALIDDATA;
86  }
87  c->planes = 3;
88  if (avctx->colorspace == AVCOL_SPC_BT709)
89  avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
90  else
91  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
92  original_format = UTVIDEO_420;
93  break;
94  case AV_PIX_FMT_YUV422P:
95  if (avctx->width & 1) {
96  av_log(avctx, AV_LOG_ERROR,
97  "4:2:2 video requires even width.\n");
98  return AVERROR_INVALIDDATA;
99  }
100  c->planes = 3;
101  if (avctx->colorspace == AVCOL_SPC_BT709)
102  avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
103  else
104  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
105  original_format = UTVIDEO_422;
106  break;
107  case AV_PIX_FMT_YUV444P:
108  c->planes = 3;
109  if (avctx->colorspace == AVCOL_SPC_BT709)
110  avctx->codec_tag = MKTAG('U', 'L', 'H', '4');
111  else
112  avctx->codec_tag = MKTAG('U', 'L', 'Y', '4');
113  original_format = UTVIDEO_444;
114  break;
115  default:
116  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
117  avctx->pix_fmt);
118  return AVERROR_INVALIDDATA;
119  }
120 
121  ff_bswapdsp_init(&c->bdsp);
123 
124 #if FF_API_PRIVATE_OPT
126  /* Check the prediction method, and error out if unsupported */
127  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
128  av_log(avctx, AV_LOG_WARNING,
129  "Prediction method %d is not supported in Ut Video.\n",
130  avctx->prediction_method);
132  }
133 
134  if (avctx->prediction_method == FF_PRED_PLANE) {
135  av_log(avctx, AV_LOG_ERROR,
136  "Plane prediction is not supported in Ut Video.\n");
138  }
139 
140  /* Convert from libavcodec prediction type to Ut Video's */
141  if (avctx->prediction_method)
144 #endif
145 
146  if (c->frame_pred == PRED_GRADIENT) {
147  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
149  }
150 
151  /*
152  * Check the asked slice count for obviously invalid
153  * values (> 256 or negative).
154  */
155  if (avctx->slices > 256 || avctx->slices < 0) {
156  av_log(avctx, AV_LOG_ERROR,
157  "Slice count %d is not supported in Ut Video (theoretical range is 0-256).\n",
158  avctx->slices);
159  return AVERROR(EINVAL);
160  }
161 
162  /* Check that the slice count is not larger than the subsampled height */
163  subsampled_height = avctx->height >> av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h;
164  if (avctx->slices > subsampled_height) {
165  av_log(avctx, AV_LOG_ERROR,
166  "Slice count %d is larger than the subsampling-applied height %d.\n",
167  avctx->slices, subsampled_height);
168  return AVERROR(EINVAL);
169  }
170 
171  /* extradata size is 4 * 32 bits */
172  avctx->extradata_size = 16;
173 
174  avctx->extradata = av_mallocz(avctx->extradata_size +
176 
177  if (!avctx->extradata) {
178  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
179  utvideo_encode_close(avctx);
180  return AVERROR(ENOMEM);
181  }
182 
183  for (i = 0; i < c->planes; i++) {
184  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
186  if (!c->slice_buffer[i]) {
187  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
188  utvideo_encode_close(avctx);
189  return AVERROR(ENOMEM);
190  }
191  }
192 
193  /*
194  * Set the version of the encoder.
195  * Last byte is "implementation ID", which is
196  * obtained from the creator of the format.
197  * Libavcodec has been assigned with the ID 0xF0.
198  */
199  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
200 
201  /*
202  * Set the "original format"
203  * Not used for anything during decoding.
204  */
205  AV_WL32(avctx->extradata + 4, original_format);
206 
207  /* Write 4 as the 'frame info size' */
208  AV_WL32(avctx->extradata + 8, c->frame_info_size);
209 
210  /*
211  * Set how many slices are going to be used.
212  * By default uses multiple slices depending on the subsampled height.
213  * This enables multithreading in the official decoder.
214  */
215  if (!avctx->slices) {
216  c->slices = subsampled_height / 120;
217 
218  if (!c->slices)
219  c->slices = 1;
220  else if (c->slices > 256)
221  c->slices = 256;
222  } else {
223  c->slices = avctx->slices;
224  }
225 
226  /* Set compression mode */
227  c->compression = COMP_HUFF;
228 
229  /*
230  * Set the encoding flags:
231  * - Slice count minus 1
232  * - Interlaced encoding mode flag, set to zero for now.
233  * - Compression mode (none/huff)
234  * And write the flags.
235  */
236  c->flags = (c->slices - 1) << 24;
237  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
238  c->flags |= c->compression;
239 
240  AV_WL32(avctx->extradata + 12, c->flags);
241 
242  return 0;
243 }
244 
245 static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src,
246  int step, int stride, int width, int height)
247 {
248  int i, j;
249  int k = 2 * dst_stride;
250  unsigned int g;
251 
252  for (j = 0; j < height; j++) {
253  if (step == 3) {
254  for (i = 0; i < width * step; i += step) {
255  g = src[i + 1];
256  dst[0][k] = g;
257  g += 0x80;
258  dst[1][k] = src[i + 2] - g;
259  dst[2][k] = src[i + 0] - g;
260  k++;
261  }
262  } else {
263  for (i = 0; i < width * step; i += step) {
264  g = src[i + 1];
265  dst[0][k] = g;
266  g += 0x80;
267  dst[1][k] = src[i + 2] - g;
268  dst[2][k] = src[i + 0] - g;
269  dst[3][k] = src[i + 3];
270  k++;
271  }
272  }
273  k += dst_stride - width;
274  src += stride;
275  }
276 }
277 
278 /* Write data to a plane with left prediction */
279 static void left_predict(uint8_t *src, uint8_t *dst, int stride,
280  int width, int height)
281 {
282  int i, j;
283  uint8_t prev;
284 
285  prev = 0x80; /* Set the initial value */
286  for (j = 0; j < height; j++) {
287  for (i = 0; i < width; i++) {
288  *dst++ = src[i] - prev;
289  prev = src[i];
290  }
291  src += stride;
292  }
293 }
294 
295 /* Write data to a plane with median prediction */
297  int width, int height)
298 {
299  int i, j;
300  int A, B;
301  uint8_t prev;
302 
303  /* First line uses left neighbour prediction */
304  prev = 0x80; /* Set the initial value */
305  for (i = 0; i < width; i++) {
306  *dst++ = src[i] - prev;
307  prev = src[i];
308  }
309 
310  if (height == 1)
311  return;
312 
313  src += stride;
314 
315  /*
316  * Second line uses top prediction for the first sample,
317  * and median for the rest.
318  */
319  A = B = 0;
320 
321  /* Rest of the coded part uses median prediction */
322  for (j = 1; j < height; j++) {
323  c->hdsp.sub_hfyu_median_pred(dst, src - stride, src, width, &A, &B);
324  dst += width;
325  src += stride;
326  }
327 }
328 
329 /* Count the usage of values in a plane */
330 static void count_usage(uint8_t *src, int width,
331  int height, uint64_t *counts)
332 {
333  int i, j;
334 
335  for (j = 0; j < height; j++) {
336  for (i = 0; i < width; i++) {
337  counts[src[i]]++;
338  }
339  src += width;
340  }
341 }
342 
343 /* Calculate the actual huffman codes from the code lengths */
344 static void calculate_codes(HuffEntry *he)
345 {
346  int last, i;
347  uint32_t code;
348 
349  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
350 
351  last = 255;
352  while (he[last].len == 255 && last)
353  last--;
354 
355  code = 1;
356  for (i = last; i >= 0; i--) {
357  he[i].code = code >> (32 - he[i].len);
358  code += 0x80000000u >> (he[i].len - 1);
359  }
360 
361  qsort(he, 256, sizeof(*he), huff_cmp_sym);
362 }
363 
364 /* Write huffman bit codes to a memory block */
365 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
366  int width, int height, HuffEntry *he)
367 {
368  PutBitContext pb;
369  int i, j;
370  int count;
371 
372  init_put_bits(&pb, dst, dst_size);
373 
374  /* Write the codes */
375  for (j = 0; j < height; j++) {
376  for (i = 0; i < width; i++)
377  put_bits(&pb, he[src[i]].len, he[src[i]].code);
378 
379  src += width;
380  }
381 
382  /* Pad output to a 32-bit boundary */
383  count = put_bits_count(&pb) & 0x1F;
384 
385  if (count)
386  put_bits(&pb, 32 - count, 0);
387 
388  /* Get the amount of bits written */
389  count = put_bits_count(&pb);
390 
391  /* Flush the rest with zeroes */
392  flush_put_bits(&pb);
393 
394  return count;
395 }
396 
398  uint8_t *dst, int stride, int plane_no,
399  int width, int height, PutByteContext *pb)
400 {
401  UtvideoContext *c = avctx->priv_data;
402  uint8_t lengths[256];
403  uint64_t counts[256] = { 0 };
404 
405  HuffEntry he[256];
406 
407  uint32_t offset = 0, slice_len = 0;
408  const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
409  int i, sstart, send = 0;
410  int symbol;
411  int ret;
412 
413  /* Do prediction / make planes */
414  switch (c->frame_pred) {
415  case PRED_NONE:
416  for (i = 0; i < c->slices; i++) {
417  sstart = send;
418  send = height * (i + 1) / c->slices & cmask;
419  av_image_copy_plane(dst + sstart * width, width,
420  src + sstart * stride, stride,
421  width, send - sstart);
422  }
423  break;
424  case PRED_LEFT:
425  for (i = 0; i < c->slices; i++) {
426  sstart = send;
427  send = height * (i + 1) / c->slices & cmask;
428  left_predict(src + sstart * stride, dst + sstart * width,
429  stride, width, send - sstart);
430  }
431  break;
432  case PRED_MEDIAN:
433  for (i = 0; i < c->slices; i++) {
434  sstart = send;
435  send = height * (i + 1) / c->slices & cmask;
436  median_predict(c, src + sstart * stride, dst + sstart * width,
437  stride, width, send - sstart);
438  }
439  break;
440  default:
441  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
442  c->frame_pred);
444  }
445 
446  /* Count the usage of values */
447  count_usage(dst, width, height, counts);
448 
449  /* Check for a special case where only one symbol was used */
450  for (symbol = 0; symbol < 256; symbol++) {
451  /* If non-zero count is found, see if it matches width * height */
452  if (counts[symbol]) {
453  /* Special case if only one symbol was used */
454  if (counts[symbol] == width * (int64_t)height) {
455  /*
456  * Write a zero for the single symbol
457  * used in the plane, else 0xFF.
458  */
459  for (i = 0; i < 256; i++) {
460  if (i == symbol)
461  bytestream2_put_byte(pb, 0);
462  else
463  bytestream2_put_byte(pb, 0xFF);
464  }
465 
466  /* Write zeroes for lengths */
467  for (i = 0; i < c->slices; i++)
468  bytestream2_put_le32(pb, 0);
469 
470  /* And that's all for that plane folks */
471  return 0;
472  }
473  break;
474  }
475  }
476 
477  /* Calculate huffman lengths */
478  if ((ret = ff_huff_gen_len_table(lengths, counts, 256, 1)) < 0)
479  return ret;
480 
481  /*
482  * Write the plane's header into the output packet:
483  * - huffman code lengths (256 bytes)
484  * - slice end offsets (gotten from the slice lengths)
485  */
486  for (i = 0; i < 256; i++) {
487  bytestream2_put_byte(pb, lengths[i]);
488 
489  he[i].len = lengths[i];
490  he[i].sym = i;
491  }
492 
493  /* Calculate the huffman codes themselves */
494  calculate_codes(he);
495 
496  send = 0;
497  for (i = 0; i < c->slices; i++) {
498  sstart = send;
499  send = height * (i + 1) / c->slices & cmask;
500 
501  /*
502  * Write the huffman codes to a buffer,
503  * get the offset in bits and convert to bytes.
504  */
505  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
506  width * height + 4, width,
507  send - sstart, he) >> 3;
508 
509  slice_len = offset - slice_len;
510 
511  /* Byteswap the written huffman codes */
512  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
513  (uint32_t *) c->slice_bits,
514  slice_len >> 2);
515 
516  /* Write the offset to the stream */
517  bytestream2_put_le32(pb, offset);
518 
519  /* Seek to the data part of the packet */
520  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
521  offset - slice_len, SEEK_CUR);
522 
523  /* Write the slices' data into the output packet */
524  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
525 
526  /* Seek back to the slice offsets */
527  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
528  SEEK_CUR);
529 
530  slice_len = offset;
531  }
532 
533  /* And at the end seek to the end of written slice(s) */
534  bytestream2_seek_p(pb, offset, SEEK_CUR);
535 
536  return 0;
537 }
538 
540  const AVFrame *pic, int *got_packet)
541 {
542  UtvideoContext *c = avctx->priv_data;
543  PutByteContext pb;
544 
545  uint32_t frame_info;
546 
547  uint8_t *dst;
548 
549  int width = avctx->width, height = avctx->height;
550  int i, ret = 0;
551 
552  /* Allocate a new packet if needed, and set it to the pointer dst */
553  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
554  c->planes + 4, 0);
555 
556  if (ret < 0)
557  return ret;
558 
559  dst = pkt->data;
560 
561  bytestream2_init_writer(&pb, dst, pkt->size);
562 
563  av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
564 
565  if (!c->slice_bits) {
566  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
567  return AVERROR(ENOMEM);
568  }
569 
570  /* In case of RGB, mangle the planes to Ut Video's format */
571  if (avctx->pix_fmt == AV_PIX_FMT_RGBA || avctx->pix_fmt == AV_PIX_FMT_RGB24)
573  c->planes, pic->linesize[0], width, height);
574 
575  /* Deal with the planes */
576  switch (avctx->pix_fmt) {
577  case AV_PIX_FMT_RGB24:
578  case AV_PIX_FMT_RGBA:
579  for (i = 0; i < c->planes; i++) {
580  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
581  c->slice_buffer[i], c->slice_stride, i,
582  width, height, &pb);
583 
584  if (ret) {
585  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
586  return ret;
587  }
588  }
589  break;
590  case AV_PIX_FMT_YUV444P:
591  for (i = 0; i < c->planes; i++) {
592  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
593  pic->linesize[i], i, width, height, &pb);
594 
595  if (ret) {
596  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
597  return ret;
598  }
599  }
600  break;
601  case AV_PIX_FMT_YUV422P:
602  for (i = 0; i < c->planes; i++) {
603  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
604  pic->linesize[i], i, width >> !!i, height, &pb);
605 
606  if (ret) {
607  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
608  return ret;
609  }
610  }
611  break;
612  case AV_PIX_FMT_YUV420P:
613  for (i = 0; i < c->planes; i++) {
614  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
615  pic->linesize[i], i, width >> !!i, height >> !!i,
616  &pb);
617 
618  if (ret) {
619  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
620  return ret;
621  }
622  }
623  break;
624  default:
625  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
626  avctx->pix_fmt);
627  return AVERROR_INVALIDDATA;
628  }
629 
630  /*
631  * Write frame information (LE 32-bit unsigned)
632  * into the output packet.
633  * Contains the prediction method.
634  */
635  frame_info = c->frame_pred << 8;
636  bytestream2_put_le32(&pb, frame_info);
637 
638  /*
639  * At least currently Ut Video is IDR only.
640  * Set flags accordingly.
641  */
642 #if FF_API_CODED_FRAME
644  avctx->coded_frame->key_frame = 1;
647 #endif
648 
649  pkt->size = bytestream2_tell_p(&pb);
650  pkt->flags |= AV_PKT_FLAG_KEY;
651 
652  /* Packet should be done */
653  *got_packet = 1;
654 
655  return 0;
656 }
657 
658 #define OFFSET(x) offsetof(UtvideoContext, x)
659 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
660 static const AVOption options[] = {
661 { "pred", "Prediction method", OFFSET(frame_pred), AV_OPT_TYPE_INT, { .i64 = PRED_LEFT }, PRED_NONE, PRED_MEDIAN, VE, "pred" },
662  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_NONE }, INT_MIN, INT_MAX, VE, "pred" },
663  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_LEFT }, INT_MIN, INT_MAX, VE, "pred" },
664  { "gradient", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_GRADIENT }, INT_MIN, INT_MAX, VE, "pred" },
665  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PRED_MEDIAN }, INT_MIN, INT_MAX, VE, "pred" },
666 
667  { NULL},
668 };
669 
670 static const AVClass utvideo_class = {
671  .class_name = "utvideo",
672  .item_name = av_default_item_name,
673  .option = options,
674  .version = LIBAVUTIL_VERSION_INT,
675 };
676 
678  .name = "utvideo",
679  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
680  .type = AVMEDIA_TYPE_VIDEO,
681  .id = AV_CODEC_ID_UTVIDEO,
682  .priv_data_size = sizeof(UtvideoContext),
683  .priv_class = &utvideo_class,
685  .encode2 = utvideo_encode_frame,
686  .close = utvideo_encode_close,
688  .pix_fmts = (const enum AVPixelFormat[]) {
691  },
692 };
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:438
int slice_stride
Definition: utvideo.h:82
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2263
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
AVOption.
Definition: opt.h:245
static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, int step, int stride, int width, int height)
Definition: utvideoenc.c:245
uint32_t flags
Definition: utvideo.h:74
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:42
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
const char * g
Definition: vf_curves.c:112
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int slice_bits_size
Definition: utvideo.h:84
int size
Definition: avcodec.h:1601
const char * b
Definition: vf_curves.c:113
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1903
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:60
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
static AVPacket pkt
AVCodec.
Definition: avcodec.h:3589
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c)
Definition: huffyuvencdsp.c:77
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1051
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:677
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1790
Definition: vf_geq.c:46
#define height
uint8_t * data
Definition: avcodec.h:1600
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:539
uint32_t code
Definition: magicyuv.c:49
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static void left_predict(uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:279
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1632
BswapDSPContext bdsp
Definition: utvideo.h:71
const int ff_ut_pred_order[5]
Definition: utvideo.c:30
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
uint8_t sym
Definition: magicyuv.c:47
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
AVCodecContext * avctx
Definition: utvideo.h:70
const char * name
Name of the codec implementation.
Definition: avcodec.h:3596
uint32_t frame_info_size
Definition: utvideo.h:74
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:365
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:193
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1021
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1606
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:296
int compression
Definition: utvideo.h:77
static const AVOption options[]
Definition: utvideoenc.c:660
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
#define width
int width
picture width / height.
Definition: avcodec.h:1862
static const AVClass utvideo_class
Definition: utvideoenc.c:670
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, int stride, int plane_no, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:397
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
#define src
Definition: vp9dsp.c:530
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
#define OFFSET(x)
Definition: utvideoenc.c:658
Common Ut Video header.
int frame_pred
Definition: utvideo.h:79
uint8_t len
Definition: magicyuv.c:48
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
Libavcodec external API header.
attribute_deprecated int prediction_method
Definition: avcodec.h:2066
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
main external API structure.
Definition: avcodec.h:1675
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1707
int extradata_size
Definition: avcodec.h:1791
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2405
huffman tree builder and VLC generator
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1695
#define u(width,...)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
uint8_t * slice_bits
Definition: utvideo.h:83
void(* sub_hfyu_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
Definition: huffyuvencdsp.h:33
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:344
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:330
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
static double c[64]
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3097
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:733
int slices
Number of slices.
Definition: avcodec.h:2428
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1717
HuffYUVEncDSPContext hdsp
Definition: utvideo.h:72
uint8_t * slice_buffer[4]
Definition: utvideo.h:83
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
int len
#define FF_PRED_PLANE
Definition: avcodec.h:2068
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define av_freep(p)
#define VE
Definition: utvideoenc.c:659
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:287
#define stride
#define MKTAG(a, b, c, d)
Definition: common.h:342
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:48
This structure stores compressed data.
Definition: avcodec.h:1577
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
bitstream writer API