FFmpeg
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "atsc_a53.h"
35 #include "encode.h"
36 #include "h264.h"
37 #include "h264_sei.h"
38 #include <dlfcn.h>
39 
40 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
41 enum { kCMVideoCodecType_HEVC = 'hvc1' };
42 #endif
43 
44 #if !HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA
46 #endif
47 
48 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
51 #endif
52 
53 #ifndef TARGET_CPU_ARM64
54 # define TARGET_CPU_ARM64 0
55 #endif
56 
57 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
58  size_t parameterSetIndex,
59  const uint8_t **parameterSetPointerOut,
60  size_t *parameterSetSizeOut,
61  size_t *parameterSetCountOut,
62  int *NALUnitHeaderLengthOut);
63 
64 //These symbols may not be present
65 static struct{
69 
73 
95 
98 
101 
104 
106 } compat_keys;
107 
108 #define GET_SYM(symbol, defaultVal) \
109 do{ \
110  CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
111  if(!handle) \
112  compat_keys.symbol = CFSTR(defaultVal); \
113  else \
114  compat_keys.symbol = *handle; \
115 }while(0)
116 
118 
119 static void loadVTEncSymbols(){
120  compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
121  (getParameterSetAtIndex)dlsym(
122  RTLD_DEFAULT,
123  "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
124  );
125 
129 
133 
134  GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
135  GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
136  GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
137  GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
138  GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
139  GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
140  GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
141  GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
142  GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
143  GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
144  GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
145  GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
146  GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
147  GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
148  GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
149  GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
150  GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
151  GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
152  GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
153  GET_SYM(kVTProfileLevel_H264_Extended_5_0, "H264_Extended_5_0");
154  GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
155 
156  GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
157  GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
158 
161  "TargetQualityForAlpha");
162 
164  "EnableHardwareAcceleratedVideoEncoder");
166  "RequireHardwareAcceleratedVideoEncoder");
167 }
168 
169 typedef enum VT_H264Profile {
177 
178 typedef enum VTH264Entropy{
182 } VTH264Entropy;
183 
184 typedef enum VT_HEVCProfile {
190 
191 static const uint8_t start_code[] = { 0, 0, 0, 1 };
192 
193 typedef struct ExtraSEI {
194  void *data;
195  size_t size;
196 } ExtraSEI;
197 
198 typedef struct BufNode {
199  CMSampleBufferRef cm_buffer;
201  struct BufNode* next;
202  int error;
203 } BufNode;
204 
205 typedef struct VTEncContext {
206  AVClass *class;
208  VTCompressionSessionRef session;
209  CFStringRef ycbcr_matrix;
210  CFStringRef color_primaries;
211  CFStringRef transfer_function;
213 
216 
218 
221 
222  int64_t frame_ct_out;
223  int64_t frame_ct_in;
224 
225  int64_t first_pts;
226  int64_t dts_delta;
227 
228  int64_t profile;
229  int64_t level;
230  int64_t entropy;
231  int64_t realtime;
232  int64_t frames_before;
233  int64_t frames_after;
234 
235  int64_t allow_sw;
236  int64_t require_sw;
238 
239  bool flushing;
242 
243  /* can't be bool type since AVOption will access it as int */
244  int a53_cc;
245 } VTEncContext;
246 
247 static int vtenc_populate_extradata(AVCodecContext *avctx,
248  CMVideoCodecType codec_type,
249  CFStringRef profile_level,
250  CFNumberRef gamma_level,
251  CFDictionaryRef enc_info,
252  CFDictionaryRef pixel_buffer_info);
253 
254 /**
255  * NULL-safe release of *refPtr, and sets value to NULL.
256  */
257 static void vt_release_num(CFNumberRef* refPtr){
258  if (!*refPtr) {
259  return;
260  }
261 
262  CFRelease(*refPtr);
263  *refPtr = NULL;
264 }
265 
266 static void set_async_error(VTEncContext *vtctx, int err)
267 {
268  BufNode *info;
269 
270  pthread_mutex_lock(&vtctx->lock);
271 
272  vtctx->async_error = err;
273 
274  info = vtctx->q_head;
275  vtctx->q_head = vtctx->q_tail = NULL;
276 
277  while (info) {
278  BufNode *next = info->next;
279  CFRelease(info->cm_buffer);
280  av_free(info);
281  info = next;
282  }
283 
284  pthread_mutex_unlock(&vtctx->lock);
285 }
286 
287 static void clear_frame_queue(VTEncContext *vtctx)
288 {
289  set_async_error(vtctx, 0);
290 }
291 
292 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
293 {
294  BufNode *info;
295 
296  pthread_mutex_lock(&vtctx->lock);
297 
298  if (vtctx->async_error) {
299  pthread_mutex_unlock(&vtctx->lock);
300  return vtctx->async_error;
301  }
302 
303  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
304  *buf = NULL;
305 
306  pthread_mutex_unlock(&vtctx->lock);
307  return 0;
308  }
309 
310  while (!vtctx->q_head && !vtctx->async_error && wait && !vtctx->flushing) {
311  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
312  }
313 
314  if (!vtctx->q_head) {
315  pthread_mutex_unlock(&vtctx->lock);
316  *buf = NULL;
317  return 0;
318  }
319 
320  info = vtctx->q_head;
321  vtctx->q_head = vtctx->q_head->next;
322  if (!vtctx->q_head) {
323  vtctx->q_tail = NULL;
324  }
325 
326  vtctx->frame_ct_out++;
327  pthread_mutex_unlock(&vtctx->lock);
328 
329  *buf = info->cm_buffer;
330  if (sei && *buf) {
331  *sei = info->sei;
332  } else if (info->sei) {
333  if (info->sei->data) av_free(info->sei->data);
334  av_free(info->sei);
335  }
336  av_free(info);
337 
338 
339  return 0;
340 }
341 
342 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
343 {
344  BufNode *info = av_malloc(sizeof(BufNode));
345  if (!info) {
346  set_async_error(vtctx, AVERROR(ENOMEM));
347  return;
348  }
349 
350  CFRetain(buffer);
351  info->cm_buffer = buffer;
352  info->sei = sei;
353  info->next = NULL;
354 
355  pthread_mutex_lock(&vtctx->lock);
356 
357  if (!vtctx->q_head) {
358  vtctx->q_head = info;
359  } else {
360  vtctx->q_tail->next = info;
361  }
362 
363  vtctx->q_tail = info;
364 
366  pthread_mutex_unlock(&vtctx->lock);
367 }
368 
369 static int count_nalus(size_t length_code_size,
370  CMSampleBufferRef sample_buffer,
371  int *count)
372 {
373  size_t offset = 0;
374  int status;
375  int nalu_ct = 0;
376  uint8_t size_buf[4];
377  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
378  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
379 
380  if (length_code_size > 4)
381  return AVERROR_INVALIDDATA;
382 
383  while (offset < src_size) {
384  size_t curr_src_len;
385  size_t box_len = 0;
386  size_t i;
387 
388  status = CMBlockBufferCopyDataBytes(block,
389  offset,
390  length_code_size,
391  size_buf);
392 
393  for (i = 0; i < length_code_size; i++) {
394  box_len <<= 8;
395  box_len |= size_buf[i];
396  }
397 
398  curr_src_len = box_len + length_code_size;
399  offset += curr_src_len;
400 
401  nalu_ct++;
402  }
403 
404  *count = nalu_ct;
405  return 0;
406 }
407 
408 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id,
409  enum AVPixelFormat fmt,
410  double alpha_quality)
411 {
412  switch (id) {
413  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
414  case AV_CODEC_ID_HEVC:
415  if (fmt == AV_PIX_FMT_BGRA && alpha_quality > 0.0) {
417  }
418  return kCMVideoCodecType_HEVC;
419  default: return 0;
420  }
421 }
422 
423 /**
424  * Get the parameter sets from a CMSampleBufferRef.
425  * @param dst If *dst isn't NULL, the parameters are copied into existing
426  * memory. *dst_size must be set accordingly when *dst != NULL.
427  * If *dst is NULL, it will be allocated.
428  * In all cases, *dst_size is set to the number of bytes used starting
429  * at *dst.
430  */
431 static int get_params_size(
432  AVCodecContext *avctx,
433  CMVideoFormatDescriptionRef vid_fmt,
434  size_t *size)
435 {
436  VTEncContext *vtctx = avctx->priv_data;
437  size_t total_size = 0;
438  size_t ps_count;
439  int is_count_bad = 0;
440  size_t i;
441  int status;
442  status = vtctx->get_param_set_func(vid_fmt,
443  0,
444  NULL,
445  NULL,
446  &ps_count,
447  NULL);
448  if (status) {
449  is_count_bad = 1;
450  ps_count = 0;
451  status = 0;
452  }
453 
454  for (i = 0; i < ps_count || is_count_bad; i++) {
455  const uint8_t *ps;
456  size_t ps_size;
457  status = vtctx->get_param_set_func(vid_fmt,
458  i,
459  &ps,
460  &ps_size,
461  NULL,
462  NULL);
463  if (status) {
464  /*
465  * When ps_count is invalid, status != 0 ends the loop normally
466  * unless we didn't get any parameter sets.
467  */
468  if (i > 0 && is_count_bad) status = 0;
469 
470  break;
471  }
472 
473  total_size += ps_size + sizeof(start_code);
474  }
475 
476  if (status) {
477  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
478  return AVERROR_EXTERNAL;
479  }
480 
481  *size = total_size;
482  return 0;
483 }
484 
485 static int copy_param_sets(
486  AVCodecContext *avctx,
487  CMVideoFormatDescriptionRef vid_fmt,
488  uint8_t *dst,
489  size_t dst_size)
490 {
491  VTEncContext *vtctx = avctx->priv_data;
492  size_t ps_count;
493  int is_count_bad = 0;
494  int status;
495  size_t offset = 0;
496  size_t i;
497 
498  status = vtctx->get_param_set_func(vid_fmt,
499  0,
500  NULL,
501  NULL,
502  &ps_count,
503  NULL);
504  if (status) {
505  is_count_bad = 1;
506  ps_count = 0;
507  status = 0;
508  }
509 
510 
511  for (i = 0; i < ps_count || is_count_bad; i++) {
512  const uint8_t *ps;
513  size_t ps_size;
514  size_t next_offset;
515 
516  status = vtctx->get_param_set_func(vid_fmt,
517  i,
518  &ps,
519  &ps_size,
520  NULL,
521  NULL);
522  if (status) {
523  if (i > 0 && is_count_bad) status = 0;
524 
525  break;
526  }
527 
528  next_offset = offset + sizeof(start_code) + ps_size;
529  if (dst_size < next_offset) {
530  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
532  }
533 
534  memcpy(dst + offset, start_code, sizeof(start_code));
535  offset += sizeof(start_code);
536 
537  memcpy(dst + offset, ps, ps_size);
538  offset = next_offset;
539  }
540 
541  if (status) {
542  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
543  return AVERROR_EXTERNAL;
544  }
545 
546  return 0;
547 }
548 
549 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
550 {
551  CMVideoFormatDescriptionRef vid_fmt;
552  size_t total_size;
553  int status;
554 
555  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
556  if (!vid_fmt) {
557  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
558  return AVERROR_EXTERNAL;
559  }
560 
561  status = get_params_size(avctx, vid_fmt, &total_size);
562  if (status) {
563  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
564  return status;
565  }
566 
567  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
568  if (!avctx->extradata) {
569  return AVERROR(ENOMEM);
570  }
571  avctx->extradata_size = total_size;
572 
573  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
574 
575  if (status) {
576  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
577  return status;
578  }
579 
580  return 0;
581 }
582 
584  void *ctx,
585  void *sourceFrameCtx,
586  OSStatus status,
587  VTEncodeInfoFlags flags,
588  CMSampleBufferRef sample_buffer)
589 {
590  AVCodecContext *avctx = ctx;
591  VTEncContext *vtctx = avctx->priv_data;
592  ExtraSEI *sei = sourceFrameCtx;
593 
594  if (vtctx->async_error) {
595  return;
596  }
597 
598  if (status) {
599  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
601  return;
602  }
603 
604  if (!sample_buffer) {
605  return;
606  }
607 
608  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
609  int set_status = set_extradata(avctx, sample_buffer);
610  if (set_status) {
611  set_async_error(vtctx, set_status);
612  return;
613  }
614  }
615 
616  vtenc_q_push(vtctx, sample_buffer, sei);
617 }
618 
620  AVCodecContext *avctx,
621  CMSampleBufferRef sample_buffer,
622  size_t *size)
623 {
624  VTEncContext *vtctx = avctx->priv_data;
625  CMVideoFormatDescriptionRef vid_fmt;
626  int isize;
627  int status;
628 
629  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
630  if (!vid_fmt) {
631  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
632  return AVERROR_EXTERNAL;
633  }
634 
635  status = vtctx->get_param_set_func(vid_fmt,
636  0,
637  NULL,
638  NULL,
639  NULL,
640  &isize);
641  if (status) {
642  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
643  return AVERROR_EXTERNAL;
644  }
645 
646  *size = isize;
647  return 0;
648 }
649 
650 /*
651  * Returns true on success.
652  *
653  * If profile_level_val is NULL and this method returns true, don't specify the
654  * profile/level to the encoder.
655  */
657  CFStringRef *profile_level_val)
658 {
659  VTEncContext *vtctx = avctx->priv_data;
660  int64_t profile = vtctx->profile;
661 
662  if (profile == H264_PROF_AUTO && vtctx->level) {
663  //Need to pick a profile if level is not auto-selected.
665  }
666 
667  *profile_level_val = NULL;
668 
669  switch (profile) {
670  case H264_PROF_AUTO:
671  return true;
672 
673  case H264_PROF_BASELINE:
674  switch (vtctx->level) {
675  case 0: *profile_level_val =
676  compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
677  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
678  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
679  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
680  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
681  case 40: *profile_level_val =
682  compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
683  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
684  case 42: *profile_level_val =
685  compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
686  case 50: *profile_level_val =
687  compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
688  case 51: *profile_level_val =
689  compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
690  case 52: *profile_level_val =
691  compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
692  }
693  break;
694 
695  case H264_PROF_MAIN:
696  switch (vtctx->level) {
697  case 0: *profile_level_val =
698  compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
699  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
700  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
701  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
702  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
703  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
704  case 42: *profile_level_val =
705  compat_keys.kVTProfileLevel_H264_Main_4_2; break;
706  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
707  case 51: *profile_level_val =
708  compat_keys.kVTProfileLevel_H264_Main_5_1; break;
709  case 52: *profile_level_val =
710  compat_keys.kVTProfileLevel_H264_Main_5_2; break;
711  }
712  break;
713 
714  case H264_PROF_HIGH:
715  switch (vtctx->level) {
716  case 0: *profile_level_val =
717  compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
718  case 30: *profile_level_val =
719  compat_keys.kVTProfileLevel_H264_High_3_0; break;
720  case 31: *profile_level_val =
721  compat_keys.kVTProfileLevel_H264_High_3_1; break;
722  case 32: *profile_level_val =
723  compat_keys.kVTProfileLevel_H264_High_3_2; break;
724  case 40: *profile_level_val =
725  compat_keys.kVTProfileLevel_H264_High_4_0; break;
726  case 41: *profile_level_val =
727  compat_keys.kVTProfileLevel_H264_High_4_1; break;
728  case 42: *profile_level_val =
729  compat_keys.kVTProfileLevel_H264_High_4_2; break;
730  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
731  case 51: *profile_level_val =
732  compat_keys.kVTProfileLevel_H264_High_5_1; break;
733  case 52: *profile_level_val =
734  compat_keys.kVTProfileLevel_H264_High_5_2; break;
735  }
736  break;
737  case H264_PROF_EXTENDED:
738  switch (vtctx->level) {
739  case 0: *profile_level_val =
740  compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
741  case 50: *profile_level_val =
742  compat_keys.kVTProfileLevel_H264_Extended_5_0; break;
743  }
744  break;
745  }
746 
747  if (!*profile_level_val) {
748  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
749  return false;
750  }
751 
752  return true;
753 }
754 
755 /*
756  * Returns true on success.
757  *
758  * If profile_level_val is NULL and this method returns true, don't specify the
759  * profile/level to the encoder.
760  */
762  CFStringRef *profile_level_val)
763 {
764  VTEncContext *vtctx = avctx->priv_data;
765  int64_t profile = vtctx->profile;
766 
767  *profile_level_val = NULL;
768 
769  switch (profile) {
770  case HEVC_PROF_AUTO:
771  return true;
772  case HEVC_PROF_MAIN:
773  *profile_level_val =
774  compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
775  break;
776  case HEVC_PROF_MAIN10:
777  *profile_level_val =
778  compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
779  break;
780  }
781 
782  if (!*profile_level_val) {
783  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
784  return false;
785  }
786 
787  return true;
788 }
789 
791  enum AVPixelFormat fmt,
792  enum AVColorRange range,
793  int* av_pixel_format,
794  int* range_guessed)
795 {
796  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
797  range != AVCOL_RANGE_JPEG;
798 
799  //MPEG range is used when no range is set
800  if (fmt == AV_PIX_FMT_NV12) {
801  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
802  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
803  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
804  } else if (fmt == AV_PIX_FMT_YUV420P) {
805  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
806  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
807  kCVPixelFormatType_420YpCbCr8Planar;
808  } else if (fmt == AV_PIX_FMT_BGRA) {
809  *av_pixel_format = kCVPixelFormatType_32BGRA;
810  } else if (fmt == AV_PIX_FMT_P010LE) {
811  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
814  } else {
815  return AVERROR(EINVAL);
816  }
817 
818  return 0;
819 }
820 
821 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
822  VTEncContext *vtctx = avctx->priv_data;
823 
824  if (vtctx->color_primaries) {
825  CFDictionarySetValue(dict,
826  kCVImageBufferColorPrimariesKey,
827  vtctx->color_primaries);
828  }
829 
830  if (vtctx->transfer_function) {
831  CFDictionarySetValue(dict,
832  kCVImageBufferTransferFunctionKey,
833  vtctx->transfer_function);
834  }
835 
836  if (vtctx->ycbcr_matrix) {
837  CFDictionarySetValue(dict,
838  kCVImageBufferYCbCrMatrixKey,
839  vtctx->ycbcr_matrix);
840  }
841 }
842 
844  CFMutableDictionaryRef* dict)
845 {
846  CFNumberRef cv_color_format_num = NULL;
847  CFNumberRef width_num = NULL;
848  CFNumberRef height_num = NULL;
849  CFMutableDictionaryRef pixel_buffer_info = NULL;
850  int cv_color_format;
851  int status = get_cv_pixel_format(avctx,
852  avctx->pix_fmt,
853  avctx->color_range,
854  &cv_color_format,
855  NULL);
856  if (status) return status;
857 
858  pixel_buffer_info = CFDictionaryCreateMutable(
859  kCFAllocatorDefault,
860  20,
861  &kCFCopyStringDictionaryKeyCallBacks,
862  &kCFTypeDictionaryValueCallBacks);
863 
864  if (!pixel_buffer_info) goto pbinfo_nomem;
865 
866  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
867  kCFNumberSInt32Type,
868  &cv_color_format);
869  if (!cv_color_format_num) goto pbinfo_nomem;
870 
871  CFDictionarySetValue(pixel_buffer_info,
872  kCVPixelBufferPixelFormatTypeKey,
873  cv_color_format_num);
874  vt_release_num(&cv_color_format_num);
875 
876  width_num = CFNumberCreate(kCFAllocatorDefault,
877  kCFNumberSInt32Type,
878  &avctx->width);
879  if (!width_num) return AVERROR(ENOMEM);
880 
881  CFDictionarySetValue(pixel_buffer_info,
882  kCVPixelBufferWidthKey,
883  width_num);
884  vt_release_num(&width_num);
885 
886  height_num = CFNumberCreate(kCFAllocatorDefault,
887  kCFNumberSInt32Type,
888  &avctx->height);
889  if (!height_num) goto pbinfo_nomem;
890 
891  CFDictionarySetValue(pixel_buffer_info,
892  kCVPixelBufferHeightKey,
893  height_num);
894  vt_release_num(&height_num);
895 
896  add_color_attr(avctx, pixel_buffer_info);
897 
898  *dict = pixel_buffer_info;
899  return 0;
900 
901 pbinfo_nomem:
902  vt_release_num(&cv_color_format_num);
903  vt_release_num(&width_num);
904  vt_release_num(&height_num);
905  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
906 
907  return AVERROR(ENOMEM);
908 }
909 
911  CFStringRef *primaries)
912 {
913  enum AVColorPrimaries pri = avctx->color_primaries;
914  switch (pri) {
916  *primaries = NULL;
917  break;
918 
919  case AVCOL_PRI_BT470BG:
920  *primaries = kCVImageBufferColorPrimaries_EBU_3213;
921  break;
922 
923  case AVCOL_PRI_SMPTE170M:
924  *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
925  break;
926 
927  case AVCOL_PRI_BT709:
928  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
929  break;
930 
931  case AVCOL_PRI_BT2020:
932  *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
933  break;
934 
935  default:
936  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
937  *primaries = NULL;
938  return -1;
939  }
940 
941  return 0;
942 }
943 
945  CFStringRef *transfer_fnc,
946  CFNumberRef *gamma_level)
947 {
948  enum AVColorTransferCharacteristic trc = avctx->color_trc;
949  Float32 gamma;
950  *gamma_level = NULL;
951 
952  switch (trc) {
954  *transfer_fnc = NULL;
955  break;
956 
957  case AVCOL_TRC_BT709:
958  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
959  break;
960 
961  case AVCOL_TRC_SMPTE240M:
962  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
963  break;
964 
965 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
966  case AVCOL_TRC_SMPTE2084:
967  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
968  break;
969 #endif
970 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
971  case AVCOL_TRC_LINEAR:
972  *transfer_fnc = kCVImageBufferTransferFunction_Linear;
973  break;
974 #endif
975 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
977  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
978  break;
979 #endif
980 
981  case AVCOL_TRC_GAMMA22:
982  gamma = 2.2;
983  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
984  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
985  break;
986 
987  case AVCOL_TRC_GAMMA28:
988  gamma = 2.8;
989  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
990  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
991  break;
992 
993  case AVCOL_TRC_BT2020_10:
994  case AVCOL_TRC_BT2020_12:
995  *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
996  break;
997 
998  default:
999  *transfer_fnc = NULL;
1000  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
1001  return -1;
1002  }
1003 
1004  return 0;
1005 }
1006 
1007 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
1008  switch(avctx->colorspace) {
1009  case AVCOL_SPC_BT709:
1010  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
1011  break;
1012 
1013  case AVCOL_SPC_UNSPECIFIED:
1014  *matrix = NULL;
1015  break;
1016 
1017  case AVCOL_SPC_BT470BG:
1018  case AVCOL_SPC_SMPTE170M:
1019  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
1020  break;
1021 
1022  case AVCOL_SPC_SMPTE240M:
1023  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1024  break;
1025 
1026  case AVCOL_SPC_BT2020_NCL:
1027  *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1028  break;
1029 
1030  default:
1031  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1032  return -1;
1033  }
1034 
1035  return 0;
1036 }
1037 
1038 // constant quality only on Macs with Apple Silicon
1039 static bool vtenc_qscale_enabled(void)
1040 {
1041  return !TARGET_OS_IPHONE && TARGET_CPU_ARM64;
1042 }
1043 
1045  CMVideoCodecType codec_type,
1046  CFStringRef profile_level,
1047  CFNumberRef gamma_level,
1048  CFDictionaryRef enc_info,
1049  CFDictionaryRef pixel_buffer_info,
1050  VTCompressionSessionRef *session)
1051 {
1052  VTEncContext *vtctx = avctx->priv_data;
1053  SInt32 bit_rate = avctx->bit_rate;
1054  SInt32 max_rate = avctx->rc_max_rate;
1055  Float32 quality = avctx->global_quality / FF_QP2LAMBDA;
1056  CFNumberRef bit_rate_num;
1057  CFNumberRef quality_num;
1058  CFNumberRef bytes_per_second;
1059  CFNumberRef one_second;
1060  CFArrayRef data_rate_limits;
1061  int64_t bytes_per_second_value = 0;
1062  int64_t one_second_value = 0;
1063  void *nums[2];
1064 
1065  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1066  avctx->width,
1067  avctx->height,
1068  codec_type,
1069  enc_info,
1070  pixel_buffer_info,
1071  kCFAllocatorDefault,
1073  avctx,
1074  session);
1075 
1076  if (status || !vtctx->session) {
1077  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1078 
1079 #if !TARGET_OS_IPHONE
1080  if (!vtctx->allow_sw) {
1081  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1082  }
1083 #endif
1084 
1085  return AVERROR_EXTERNAL;
1086  }
1087 
1088  if (avctx->flags & AV_CODEC_FLAG_QSCALE && !vtenc_qscale_enabled()) {
1089  av_log(avctx, AV_LOG_ERROR, "Error: -q:v qscale not available for encoder. Use -b:v bitrate instead.\n");
1090  return AVERROR_EXTERNAL;
1091  }
1092 
1093  if (avctx->flags & AV_CODEC_FLAG_QSCALE) {
1094  quality = quality >= 100 ? 1.0 : quality / 100;
1095  quality_num = CFNumberCreate(kCFAllocatorDefault,
1096  kCFNumberFloat32Type,
1097  &quality);
1098  if (!quality_num) return AVERROR(ENOMEM);
1099 
1100  status = VTSessionSetProperty(vtctx->session,
1101  kVTCompressionPropertyKey_Quality,
1102  quality_num);
1103  CFRelease(quality_num);
1104  } else {
1105  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1106  kCFNumberSInt32Type,
1107  &bit_rate);
1108  if (!bit_rate_num) return AVERROR(ENOMEM);
1109 
1110  status = VTSessionSetProperty(vtctx->session,
1111  kVTCompressionPropertyKey_AverageBitRate,
1112  bit_rate_num);
1113  CFRelease(bit_rate_num);
1114  }
1115 
1116  if (status) {
1117  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1118  return AVERROR_EXTERNAL;
1119  }
1120 
1121  if ((vtctx->codec_id == AV_CODEC_ID_H264 || vtctx->codec_id == AV_CODEC_ID_HEVC)
1122  && max_rate > 0) {
1123  bytes_per_second_value = max_rate >> 3;
1124  bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1125  kCFNumberSInt64Type,
1126  &bytes_per_second_value);
1127  if (!bytes_per_second) {
1128  return AVERROR(ENOMEM);
1129  }
1130  one_second_value = 1;
1131  one_second = CFNumberCreate(kCFAllocatorDefault,
1132  kCFNumberSInt64Type,
1133  &one_second_value);
1134  if (!one_second) {
1135  CFRelease(bytes_per_second);
1136  return AVERROR(ENOMEM);
1137  }
1138  nums[0] = (void *)bytes_per_second;
1139  nums[1] = (void *)one_second;
1140  data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1141  (const void **)nums,
1142  2,
1143  &kCFTypeArrayCallBacks);
1144 
1145  if (!data_rate_limits) {
1146  CFRelease(bytes_per_second);
1147  CFRelease(one_second);
1148  return AVERROR(ENOMEM);
1149  }
1150  status = VTSessionSetProperty(vtctx->session,
1151  kVTCompressionPropertyKey_DataRateLimits,
1152  data_rate_limits);
1153 
1154  CFRelease(bytes_per_second);
1155  CFRelease(one_second);
1156  CFRelease(data_rate_limits);
1157 
1158  if (status) {
1159  av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1160  // kVTCompressionPropertyKey_DataRateLimits is available for HEVC
1161  // now but not on old release. There is no document about since
1162  // when. So ignore the error if it failed for hevc.
1163  if (vtctx->codec_id != AV_CODEC_ID_HEVC)
1164  return AVERROR_EXTERNAL;
1165  }
1166  }
1167 
1168  if (vtctx->codec_id == AV_CODEC_ID_HEVC) {
1169  if (avctx->pix_fmt == AV_PIX_FMT_BGRA && vtctx->alpha_quality > 0.0) {
1170  CFNumberRef alpha_quality_num = CFNumberCreate(kCFAllocatorDefault,
1171  kCFNumberDoubleType,
1172  &vtctx->alpha_quality);
1173  if (!alpha_quality_num) return AVERROR(ENOMEM);
1174 
1175  status = VTSessionSetProperty(vtctx->session,
1176  compat_keys.kVTCompressionPropertyKey_TargetQualityForAlpha,
1177  alpha_quality_num);
1178  CFRelease(alpha_quality_num);
1179  }
1180  }
1181 
1182  if (profile_level) {
1183  status = VTSessionSetProperty(vtctx->session,
1184  kVTCompressionPropertyKey_ProfileLevel,
1185  profile_level);
1186  if (status) {
1187  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1188  }
1189  }
1190 
1191  if (avctx->gop_size > 0) {
1192  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1193  kCFNumberIntType,
1194  &avctx->gop_size);
1195  if (!interval) {
1196  return AVERROR(ENOMEM);
1197  }
1198 
1199  status = VTSessionSetProperty(vtctx->session,
1200  kVTCompressionPropertyKey_MaxKeyFrameInterval,
1201  interval);
1202  CFRelease(interval);
1203 
1204  if (status) {
1205  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1206  return AVERROR_EXTERNAL;
1207  }
1208  }
1209 
1210  if (vtctx->frames_before) {
1211  status = VTSessionSetProperty(vtctx->session,
1212  kVTCompressionPropertyKey_MoreFramesBeforeStart,
1213  kCFBooleanTrue);
1214 
1215  if (status == kVTPropertyNotSupportedErr) {
1216  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1217  } else if (status) {
1218  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1219  }
1220  }
1221 
1222  if (vtctx->frames_after) {
1223  status = VTSessionSetProperty(vtctx->session,
1224  kVTCompressionPropertyKey_MoreFramesAfterEnd,
1225  kCFBooleanTrue);
1226 
1227  if (status == kVTPropertyNotSupportedErr) {
1228  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1229  } else if (status) {
1230  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1231  }
1232  }
1233 
1234  if (avctx->sample_aspect_ratio.num != 0) {
1235  CFNumberRef num;
1236  CFNumberRef den;
1237  CFMutableDictionaryRef par;
1238  AVRational *avpar = &avctx->sample_aspect_ratio;
1239 
1240  av_reduce(&avpar->num, &avpar->den,
1241  avpar->num, avpar->den,
1242  0xFFFFFFFF);
1243 
1244  num = CFNumberCreate(kCFAllocatorDefault,
1245  kCFNumberIntType,
1246  &avpar->num);
1247 
1248  den = CFNumberCreate(kCFAllocatorDefault,
1249  kCFNumberIntType,
1250  &avpar->den);
1251 
1252 
1253 
1254  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1255  2,
1256  &kCFCopyStringDictionaryKeyCallBacks,
1257  &kCFTypeDictionaryValueCallBacks);
1258 
1259  if (!par || !num || !den) {
1260  if (par) CFRelease(par);
1261  if (num) CFRelease(num);
1262  if (den) CFRelease(den);
1263 
1264  return AVERROR(ENOMEM);
1265  }
1266 
1267  CFDictionarySetValue(
1268  par,
1269  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1270  num);
1271 
1272  CFDictionarySetValue(
1273  par,
1274  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1275  den);
1276 
1277  status = VTSessionSetProperty(vtctx->session,
1278  kVTCompressionPropertyKey_PixelAspectRatio,
1279  par);
1280 
1281  CFRelease(par);
1282  CFRelease(num);
1283  CFRelease(den);
1284 
1285  if (status) {
1286  av_log(avctx,
1287  AV_LOG_ERROR,
1288  "Error setting pixel aspect ratio to %d:%d: %d.\n",
1289  avctx->sample_aspect_ratio.num,
1290  avctx->sample_aspect_ratio.den,
1291  status);
1292 
1293  return AVERROR_EXTERNAL;
1294  }
1295  }
1296 
1297 
1298  if (vtctx->transfer_function) {
1299  status = VTSessionSetProperty(vtctx->session,
1300  kVTCompressionPropertyKey_TransferFunction,
1301  vtctx->transfer_function);
1302 
1303  if (status) {
1304  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1305  }
1306  }
1307 
1308 
1309  if (vtctx->ycbcr_matrix) {
1310  status = VTSessionSetProperty(vtctx->session,
1311  kVTCompressionPropertyKey_YCbCrMatrix,
1312  vtctx->ycbcr_matrix);
1313 
1314  if (status) {
1315  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1316  }
1317  }
1318 
1319 
1320  if (vtctx->color_primaries) {
1321  status = VTSessionSetProperty(vtctx->session,
1322  kVTCompressionPropertyKey_ColorPrimaries,
1323  vtctx->color_primaries);
1324 
1325  if (status) {
1326  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1327  }
1328  }
1329 
1330  if (gamma_level) {
1331  status = VTSessionSetProperty(vtctx->session,
1332  kCVImageBufferGammaLevelKey,
1333  gamma_level);
1334 
1335  if (status) {
1336  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1337  }
1338  }
1339 
1340  if (!vtctx->has_b_frames) {
1341  status = VTSessionSetProperty(vtctx->session,
1342  kVTCompressionPropertyKey_AllowFrameReordering,
1343  kCFBooleanFalse);
1344 
1345  if (status) {
1346  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1347  return AVERROR_EXTERNAL;
1348  }
1349  }
1350 
1351  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1352  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1353  compat_keys.kVTH264EntropyMode_CABAC:
1354  compat_keys.kVTH264EntropyMode_CAVLC;
1355 
1356  status = VTSessionSetProperty(vtctx->session,
1357  compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1358  entropy);
1359 
1360  if (status) {
1361  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1362  }
1363  }
1364 
1365  if (vtctx->realtime) {
1366  status = VTSessionSetProperty(vtctx->session,
1367  compat_keys.kVTCompressionPropertyKey_RealTime,
1368  kCFBooleanTrue);
1369 
1370  if (status) {
1371  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1372  }
1373  }
1374 
1375  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1376  if (status) {
1377  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1378  return AVERROR_EXTERNAL;
1379  }
1380 
1381  return 0;
1382 }
1383 
1385 {
1386  CFMutableDictionaryRef enc_info;
1387  CFMutableDictionaryRef pixel_buffer_info;
1388  CMVideoCodecType codec_type;
1389  VTEncContext *vtctx = avctx->priv_data;
1390  CFStringRef profile_level;
1391  CFNumberRef gamma_level = NULL;
1392  int status;
1393 
1394  codec_type = get_cm_codec_type(avctx->codec_id, avctx->pix_fmt, vtctx->alpha_quality);
1395  if (!codec_type) {
1396  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1397  return AVERROR(EINVAL);
1398  }
1399 
1400  vtctx->codec_id = avctx->codec_id;
1401  avctx->max_b_frames = 16;
1402 
1403  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1404  vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1405 
1406  vtctx->has_b_frames = avctx->max_b_frames > 0;
1407  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1408  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1409  vtctx->has_b_frames = 0;
1410  }
1411 
1412  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1413  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1414  vtctx->entropy = VT_ENTROPY_NOT_SET;
1415  }
1416 
1417  if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1418  } else {
1419  vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1420  if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1421  if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1422  // HEVC has b-byramid
1423  vtctx->has_b_frames = avctx->max_b_frames > 0 ? 2 : 0;
1424  }
1425 
1426  enc_info = CFDictionaryCreateMutable(
1427  kCFAllocatorDefault,
1428  20,
1429  &kCFCopyStringDictionaryKeyCallBacks,
1430  &kCFTypeDictionaryValueCallBacks
1431  );
1432 
1433  if (!enc_info) return AVERROR(ENOMEM);
1434 
1435 #if !TARGET_OS_IPHONE
1436  if(vtctx->require_sw) {
1437  CFDictionarySetValue(enc_info,
1438  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1439  kCFBooleanFalse);
1440  } else if (!vtctx->allow_sw) {
1441  CFDictionarySetValue(enc_info,
1442  compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1443  kCFBooleanTrue);
1444  } else {
1445  CFDictionarySetValue(enc_info,
1446  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1447  kCFBooleanTrue);
1448  }
1449 #endif
1450 
1451  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1452  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1453  if (status)
1454  goto init_cleanup;
1455  } else {
1456  pixel_buffer_info = NULL;
1457  }
1458 
1459  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1460 
1461  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1462  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1463  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1464 
1465 
1466  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1468  codec_type,
1469  profile_level,
1470  gamma_level,
1471  enc_info,
1472  pixel_buffer_info);
1473  if (status)
1474  goto init_cleanup;
1475  }
1476 
1477  status = vtenc_create_encoder(avctx,
1478  codec_type,
1479  profile_level,
1480  gamma_level,
1481  enc_info,
1482  pixel_buffer_info,
1483  &vtctx->session);
1484 
1485 init_cleanup:
1486  if (gamma_level)
1487  CFRelease(gamma_level);
1488 
1489  if (pixel_buffer_info)
1490  CFRelease(pixel_buffer_info);
1491 
1492  CFRelease(enc_info);
1493 
1494  return status;
1495 }
1496 
1498 {
1499  VTEncContext *vtctx = avctx->priv_data;
1500  CFBooleanRef has_b_frames_cfbool;
1501  int status;
1502 
1504 
1505  pthread_mutex_init(&vtctx->lock, NULL);
1507 
1508  vtctx->session = NULL;
1510  if (status) return status;
1511 
1512  status = VTSessionCopyProperty(vtctx->session,
1513  kVTCompressionPropertyKey_AllowFrameReordering,
1514  kCFAllocatorDefault,
1515  &has_b_frames_cfbool);
1516 
1517  if (!status && has_b_frames_cfbool) {
1518  //Some devices don't output B-frames for main profile, even if requested.
1519  // HEVC has b-pyramid
1520  vtctx->has_b_frames = (CFBooleanGetValue(has_b_frames_cfbool) && avctx->codec_id == AV_CODEC_ID_HEVC) ? 2 : 1;
1521  CFRelease(has_b_frames_cfbool);
1522  }
1523  avctx->has_b_frames = vtctx->has_b_frames;
1524 
1525  return 0;
1526 }
1527 
1528 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1529 {
1530  CFArrayRef attachments;
1531  CFDictionaryRef attachment;
1532  CFBooleanRef not_sync;
1533  CFIndex len;
1534 
1535  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1536  len = !attachments ? 0 : CFArrayGetCount(attachments);
1537 
1538  if (!len) {
1539  *is_key_frame = true;
1540  return;
1541  }
1542 
1543  attachment = CFArrayGetValueAtIndex(attachments, 0);
1544 
1545  if (CFDictionaryGetValueIfPresent(attachment,
1546  kCMSampleAttachmentKey_NotSync,
1547  (const void **)&not_sync))
1548  {
1549  *is_key_frame = !CFBooleanGetValue(not_sync);
1550  } else {
1551  *is_key_frame = true;
1552  }
1553 }
1554 
1555 static int is_post_sei_nal_type(int nal_type){
1556  return nal_type != H264_NAL_SEI &&
1557  nal_type != H264_NAL_SPS &&
1558  nal_type != H264_NAL_PPS &&
1559  nal_type != H264_NAL_AUD;
1560 }
1561 
1562 /*
1563  * Finds the sei message start/size of type find_sei_type.
1564  * If more than one of that type exists, the last one is returned.
1565  */
1566 static int find_sei_end(AVCodecContext *avctx,
1567  uint8_t *nal_data,
1568  size_t nal_size,
1569  uint8_t **sei_end)
1570 {
1571  int nal_type;
1572  size_t sei_payload_size = 0;
1573  int sei_payload_type = 0;
1574  *sei_end = NULL;
1575  uint8_t *nal_start = nal_data;
1576 
1577  if (!nal_size)
1578  return 0;
1579 
1580  nal_type = *nal_data & 0x1F;
1581  if (nal_type != H264_NAL_SEI)
1582  return 0;
1583 
1584  nal_data++;
1585  nal_size--;
1586 
1587  if (nal_data[nal_size - 1] == 0x80)
1588  nal_size--;
1589 
1590  while (nal_size > 0 && *nal_data > 0) {
1591  do{
1592  sei_payload_type += *nal_data;
1593  nal_data++;
1594  nal_size--;
1595  } while (nal_size > 0 && *nal_data == 0xFF);
1596 
1597  if (!nal_size) {
1598  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1599  return AVERROR_INVALIDDATA;
1600  }
1601 
1602  do{
1603  sei_payload_size += *nal_data;
1604  nal_data++;
1605  nal_size--;
1606  } while (nal_size > 0 && *nal_data == 0xFF);
1607 
1608  if (nal_size < sei_payload_size) {
1609  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1610  return AVERROR_INVALIDDATA;
1611  }
1612 
1613  nal_data += sei_payload_size;
1614  nal_size -= sei_payload_size;
1615  }
1616 
1617  *sei_end = nal_data;
1618 
1619  return nal_data - nal_start + 1;
1620 }
1621 
1622 /**
1623  * Copies the data inserting emulation prevention bytes as needed.
1624  * Existing data in the destination can be taken into account by providing
1625  * dst with a dst_offset > 0.
1626  *
1627  * @return The number of bytes copied on success. On failure, the negative of
1628  * the number of bytes needed to copy src is returned.
1629  */
1630 static int copy_emulation_prev(const uint8_t *src,
1631  size_t src_size,
1632  uint8_t *dst,
1633  ssize_t dst_offset,
1634  size_t dst_size)
1635 {
1636  int zeros = 0;
1637  int wrote_bytes;
1638  uint8_t* dst_start;
1639  uint8_t* dst_end = dst + dst_size;
1640  const uint8_t* src_end = src + src_size;
1641  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1642  int i;
1643  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1644  if (!dst[i])
1645  zeros++;
1646  else
1647  zeros = 0;
1648  }
1649 
1650  dst += dst_offset;
1651  dst_start = dst;
1652  for (; src < src_end; src++, dst++) {
1653  if (zeros == 2) {
1654  int insert_ep3_byte = *src <= 3;
1655  if (insert_ep3_byte) {
1656  if (dst < dst_end)
1657  *dst = 3;
1658  dst++;
1659  }
1660 
1661  zeros = 0;
1662  }
1663 
1664  if (dst < dst_end)
1665  *dst = *src;
1666 
1667  if (!*src)
1668  zeros++;
1669  else
1670  zeros = 0;
1671  }
1672 
1673  wrote_bytes = dst - dst_start;
1674 
1675  if (dst > dst_end)
1676  return -wrote_bytes;
1677 
1678  return wrote_bytes;
1679 }
1680 
1681 static int write_sei(const ExtraSEI *sei,
1682  int sei_type,
1683  uint8_t *dst,
1684  size_t dst_size)
1685 {
1686  uint8_t *sei_start = dst;
1687  size_t remaining_sei_size = sei->size;
1688  size_t remaining_dst_size = dst_size;
1689  int header_bytes;
1690  int bytes_written;
1691  ssize_t offset;
1692 
1693  if (!remaining_dst_size)
1694  return AVERROR_BUFFER_TOO_SMALL;
1695 
1696  while (sei_type && remaining_dst_size != 0) {
1697  int sei_byte = sei_type > 255 ? 255 : sei_type;
1698  *dst = sei_byte;
1699 
1700  sei_type -= sei_byte;
1701  dst++;
1702  remaining_dst_size--;
1703  }
1704 
1705  if (!dst_size)
1706  return AVERROR_BUFFER_TOO_SMALL;
1707 
1708  while (remaining_sei_size && remaining_dst_size != 0) {
1709  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1710  *dst = size_byte;
1711 
1712  remaining_sei_size -= size_byte;
1713  dst++;
1714  remaining_dst_size--;
1715  }
1716 
1717  if (remaining_dst_size < sei->size)
1718  return AVERROR_BUFFER_TOO_SMALL;
1719 
1720  header_bytes = dst - sei_start;
1721 
1722  offset = header_bytes;
1723  bytes_written = copy_emulation_prev(sei->data,
1724  sei->size,
1725  sei_start,
1726  offset,
1727  dst_size);
1728  if (bytes_written < 0)
1729  return AVERROR_BUFFER_TOO_SMALL;
1730 
1731  bytes_written += header_bytes;
1732  return bytes_written;
1733 }
1734 
1735 /**
1736  * Copies NAL units and replaces length codes with
1737  * H.264 Annex B start codes. On failure, the contents of
1738  * dst_data may have been modified.
1739  *
1740  * @param length_code_size Byte length of each length code
1741  * @param sample_buffer NAL units prefixed with length codes.
1742  * @param sei Optional A53 closed captions SEI data.
1743  * @param dst_data Must be zeroed before calling this function.
1744  * Contains the copied NAL units prefixed with
1745  * start codes when the function returns
1746  * successfully.
1747  * @param dst_size Length of dst_data
1748  * @return 0 on success
1749  * AVERROR_INVALIDDATA if length_code_size is invalid
1750  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1751  * or if a length_code in src_data specifies data beyond
1752  * the end of its buffer.
1753  */
1755  AVCodecContext *avctx,
1756  size_t length_code_size,
1757  CMSampleBufferRef sample_buffer,
1758  ExtraSEI *sei,
1759  uint8_t *dst_data,
1760  size_t dst_size)
1761 {
1762  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1763  size_t remaining_src_size = src_size;
1764  size_t remaining_dst_size = dst_size;
1765  size_t src_offset = 0;
1766  int wrote_sei = 0;
1767  int status;
1768  uint8_t size_buf[4];
1769  uint8_t nal_type;
1770  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1771 
1772  if (length_code_size > 4) {
1773  return AVERROR_INVALIDDATA;
1774  }
1775 
1776  while (remaining_src_size > 0) {
1777  size_t curr_src_len;
1778  size_t curr_dst_len;
1779  size_t box_len = 0;
1780  size_t i;
1781 
1782  uint8_t *dst_box;
1783 
1784  status = CMBlockBufferCopyDataBytes(block,
1785  src_offset,
1786  length_code_size,
1787  size_buf);
1788  if (status) {
1789  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1790  return AVERROR_EXTERNAL;
1791  }
1792 
1793  status = CMBlockBufferCopyDataBytes(block,
1794  src_offset + length_code_size,
1795  1,
1796  &nal_type);
1797 
1798  if (status) {
1799  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1800  return AVERROR_EXTERNAL;
1801  }
1802 
1803  nal_type &= 0x1F;
1804 
1805  for (i = 0; i < length_code_size; i++) {
1806  box_len <<= 8;
1807  box_len |= size_buf[i];
1808  }
1809 
1810  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1811  //No SEI NAL unit - insert.
1812  int wrote_bytes;
1813 
1814  memcpy(dst_data, start_code, sizeof(start_code));
1815  dst_data += sizeof(start_code);
1816  remaining_dst_size -= sizeof(start_code);
1817 
1818  *dst_data = H264_NAL_SEI;
1819  dst_data++;
1820  remaining_dst_size--;
1821 
1822  wrote_bytes = write_sei(sei,
1824  dst_data,
1825  remaining_dst_size);
1826 
1827  if (wrote_bytes < 0)
1828  return wrote_bytes;
1829 
1830  remaining_dst_size -= wrote_bytes;
1831  dst_data += wrote_bytes;
1832 
1833  if (remaining_dst_size <= 0)
1834  return AVERROR_BUFFER_TOO_SMALL;
1835 
1836  *dst_data = 0x80;
1837 
1838  dst_data++;
1839  remaining_dst_size--;
1840 
1841  wrote_sei = 1;
1842  }
1843 
1844  curr_src_len = box_len + length_code_size;
1845  curr_dst_len = box_len + sizeof(start_code);
1846 
1847  if (remaining_src_size < curr_src_len) {
1848  return AVERROR_BUFFER_TOO_SMALL;
1849  }
1850 
1851  if (remaining_dst_size < curr_dst_len) {
1852  return AVERROR_BUFFER_TOO_SMALL;
1853  }
1854 
1855  dst_box = dst_data + sizeof(start_code);
1856 
1857  memcpy(dst_data, start_code, sizeof(start_code));
1858  status = CMBlockBufferCopyDataBytes(block,
1859  src_offset + length_code_size,
1860  box_len,
1861  dst_box);
1862 
1863  if (status) {
1864  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1865  return AVERROR_EXTERNAL;
1866  }
1867 
1868  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1869  //Found SEI NAL unit - append.
1870  int wrote_bytes;
1871  int old_sei_length;
1872  int extra_bytes;
1873  uint8_t *new_sei;
1874  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1875  if (old_sei_length < 0)
1876  return status;
1877 
1878  wrote_bytes = write_sei(sei,
1880  new_sei,
1881  remaining_dst_size - old_sei_length);
1882  if (wrote_bytes < 0)
1883  return wrote_bytes;
1884 
1885  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1886  return AVERROR_BUFFER_TOO_SMALL;
1887 
1888  new_sei[wrote_bytes++] = 0x80;
1889  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1890 
1891  dst_data += extra_bytes;
1892  remaining_dst_size -= extra_bytes;
1893 
1894  wrote_sei = 1;
1895  }
1896 
1897  src_offset += curr_src_len;
1898  dst_data += curr_dst_len;
1899 
1900  remaining_src_size -= curr_src_len;
1901  remaining_dst_size -= curr_dst_len;
1902  }
1903 
1904  return 0;
1905 }
1906 
1907 /**
1908  * Returns a sufficient number of bytes to contain the sei data.
1909  * It may be greater than the minimum required.
1910  */
1911 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1912  int copied_size;
1913  if (sei->size == 0)
1914  return 0;
1915 
1916  copied_size = -copy_emulation_prev(sei->data,
1917  sei->size,
1918  NULL,
1919  0,
1920  0);
1921 
1922  if ((sei->size % 255) == 0) //may result in an extra byte
1923  copied_size++;
1924 
1925  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1926 }
1927 
1929  AVCodecContext *avctx,
1930  CMSampleBufferRef sample_buffer,
1931  AVPacket *pkt,
1932  ExtraSEI *sei)
1933 {
1934  VTEncContext *vtctx = avctx->priv_data;
1935 
1936  int status;
1937  bool is_key_frame;
1938  bool add_header;
1939  size_t length_code_size;
1940  size_t header_size = 0;
1941  size_t in_buf_size;
1942  size_t out_buf_size;
1943  size_t sei_nalu_size = 0;
1944  int64_t dts_delta;
1945  int64_t time_base_num;
1946  int nalu_count;
1947  CMTime pts;
1948  CMTime dts;
1949  CMVideoFormatDescriptionRef vid_fmt;
1950 
1951 
1952  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1953  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1954  if (status) return status;
1955 
1956  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1957 
1958  if (add_header) {
1959  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1960  if (!vid_fmt) {
1961  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1962  return AVERROR_EXTERNAL;
1963  }
1964 
1965  int status = get_params_size(avctx, vid_fmt, &header_size);
1966  if (status) return status;
1967  }
1968 
1969  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1970  if(status)
1971  return status;
1972 
1973  if (sei) {
1974  size_t msg_size = get_sei_msg_bytes(sei,
1976 
1977  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1978  }
1979 
1980  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1981  out_buf_size = header_size +
1982  in_buf_size +
1983  sei_nalu_size +
1984  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1985 
1986  status = ff_get_encode_buffer(avctx, pkt, out_buf_size, 0);
1987  if (status < 0)
1988  return status;
1989 
1990  if (add_header) {
1991  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1992  if(status) return status;
1993  }
1994 
1996  avctx,
1997  length_code_size,
1998  sample_buffer,
1999  sei,
2000  pkt->data + header_size,
2001  pkt->size - header_size
2002  );
2003 
2004  if (status) {
2005  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
2006  return status;
2007  }
2008 
2009  if (is_key_frame) {
2011  }
2012 
2013  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
2014  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
2015 
2016  if (CMTIME_IS_INVALID(dts)) {
2017  if (!vtctx->has_b_frames) {
2018  dts = pts;
2019  } else {
2020  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
2021  return AVERROR_EXTERNAL;
2022  }
2023  }
2024 
2025  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
2026  time_base_num = avctx->time_base.num;
2027  pkt->pts = pts.value / time_base_num;
2028  pkt->dts = dts.value / time_base_num - dts_delta;
2029 
2030  return 0;
2031 }
2032 
2033 /*
2034  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
2035  * containing all planes if so.
2036  */
2038  AVCodecContext *avctx,
2039  const AVFrame *frame,
2040  int *color,
2041  int *plane_count,
2042  size_t *widths,
2043  size_t *heights,
2044  size_t *strides,
2045  size_t *contiguous_buf_size)
2046 {
2047  VTEncContext *vtctx = avctx->priv_data;
2048  int av_format = frame->format;
2049  int av_color_range = frame->color_range;
2050  int i;
2051  int range_guessed;
2052  int status;
2053 
2054  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
2055  if (status) {
2056  av_log(avctx,
2057  AV_LOG_ERROR,
2058  "Could not get pixel format for color format '%s' range '%s'.\n",
2059  av_get_pix_fmt_name(av_format),
2060  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
2061  av_color_range < AVCOL_RANGE_NB ?
2062  av_color_range_name(av_color_range) :
2063  "Unknown");
2064 
2065  return AVERROR(EINVAL);
2066  }
2067 
2068  if (range_guessed) {
2069  if (!vtctx->warned_color_range) {
2070  vtctx->warned_color_range = true;
2071  av_log(avctx,
2073  "Color range not set for %s. Using MPEG range.\n",
2074  av_get_pix_fmt_name(av_format));
2075  }
2076  }
2077 
2078  switch (av_format) {
2079  case AV_PIX_FMT_NV12:
2080  *plane_count = 2;
2081 
2082  widths [0] = avctx->width;
2083  heights[0] = avctx->height;
2084  strides[0] = frame ? frame->linesize[0] : avctx->width;
2085 
2086  widths [1] = (avctx->width + 1) / 2;
2087  heights[1] = (avctx->height + 1) / 2;
2088  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2089  break;
2090 
2091  case AV_PIX_FMT_YUV420P:
2092  *plane_count = 3;
2093 
2094  widths [0] = avctx->width;
2095  heights[0] = avctx->height;
2096  strides[0] = frame ? frame->linesize[0] : avctx->width;
2097 
2098  widths [1] = (avctx->width + 1) / 2;
2099  heights[1] = (avctx->height + 1) / 2;
2100  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2101 
2102  widths [2] = (avctx->width + 1) / 2;
2103  heights[2] = (avctx->height + 1) / 2;
2104  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2105  break;
2106 
2107  case AV_PIX_FMT_BGRA:
2108  *plane_count = 1;
2109 
2110  widths [0] = avctx->width;
2111  heights[0] = avctx->height;
2112  strides[0] = frame ? frame->linesize[0] : avctx->width * 4;
2113  break;
2114 
2115  case AV_PIX_FMT_P010LE:
2116  *plane_count = 2;
2117  widths[0] = avctx->width;
2118  heights[0] = avctx->height;
2119  strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2120 
2121  widths[1] = (avctx->width + 1) / 2;
2122  heights[1] = (avctx->height + 1) / 2;
2123  strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2124  break;
2125 
2126  default:
2127  av_log(
2128  avctx,
2129  AV_LOG_ERROR,
2130  "Could not get frame format info for color %d range %d.\n",
2131  av_format,
2132  av_color_range);
2133 
2134  return AVERROR(EINVAL);
2135  }
2136 
2137  *contiguous_buf_size = 0;
2138  for (i = 0; i < *plane_count; i++) {
2139  if (i < *plane_count - 1 &&
2140  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2141  *contiguous_buf_size = 0;
2142  break;
2143  }
2144 
2145  *contiguous_buf_size += strides[i] * heights[i];
2146  }
2147 
2148  return 0;
2149 }
2150 
2151 //Not used on OSX - frame is never copied.
2153  const AVFrame *frame,
2154  CVPixelBufferRef cv_img,
2155  const size_t *plane_strides,
2156  const size_t *plane_rows)
2157 {
2158  int i, j;
2159  size_t plane_count;
2160  int status;
2161  int rows;
2162  int src_stride;
2163  int dst_stride;
2164  uint8_t *src_addr;
2165  uint8_t *dst_addr;
2166  size_t copy_bytes;
2167 
2168  status = CVPixelBufferLockBaseAddress(cv_img, 0);
2169  if (status) {
2170  av_log(
2171  avctx,
2172  AV_LOG_ERROR,
2173  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2174  status
2175  );
2176  }
2177 
2178  if (CVPixelBufferIsPlanar(cv_img)) {
2179  plane_count = CVPixelBufferGetPlaneCount(cv_img);
2180  for (i = 0; frame->data[i]; i++) {
2181  if (i == plane_count) {
2182  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2183  av_log(avctx,
2184  AV_LOG_ERROR,
2185  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2186  );
2187 
2188  return AVERROR_EXTERNAL;
2189  }
2190 
2191  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2192  src_addr = (uint8_t*)frame->data[i];
2193  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2194  src_stride = plane_strides[i];
2195  rows = plane_rows[i];
2196 
2197  if (dst_stride == src_stride) {
2198  memcpy(dst_addr, src_addr, src_stride * rows);
2199  } else {
2200  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2201 
2202  for (j = 0; j < rows; j++) {
2203  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2204  }
2205  }
2206  }
2207  } else {
2208  if (frame->data[1]) {
2209  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2210  av_log(avctx,
2211  AV_LOG_ERROR,
2212  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2213  );
2214 
2215  return AVERROR_EXTERNAL;
2216  }
2217 
2218  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2219  src_addr = (uint8_t*)frame->data[0];
2220  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2221  src_stride = plane_strides[0];
2222  rows = plane_rows[0];
2223 
2224  if (dst_stride == src_stride) {
2225  memcpy(dst_addr, src_addr, src_stride * rows);
2226  } else {
2227  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2228 
2229  for (j = 0; j < rows; j++) {
2230  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2231  }
2232  }
2233  }
2234 
2235  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2236  if (status) {
2237  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2238  return AVERROR_EXTERNAL;
2239  }
2240 
2241  return 0;
2242 }
2243 
2245  const AVFrame *frame,
2246  CVPixelBufferRef *cv_img)
2247 {
2248  int plane_count;
2249  int color;
2250  size_t widths [AV_NUM_DATA_POINTERS];
2251  size_t heights[AV_NUM_DATA_POINTERS];
2252  size_t strides[AV_NUM_DATA_POINTERS];
2253  int status;
2254  size_t contiguous_buf_size;
2255  CVPixelBufferPoolRef pix_buf_pool;
2256  VTEncContext* vtctx = avctx->priv_data;
2257 
2258  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2260 
2261  *cv_img = (CVPixelBufferRef)frame->data[3];
2262  av_assert0(*cv_img);
2263 
2264  CFRetain(*cv_img);
2265  return 0;
2266  }
2267 
2268  memset(widths, 0, sizeof(widths));
2269  memset(heights, 0, sizeof(heights));
2270  memset(strides, 0, sizeof(strides));
2271 
2273  avctx,
2274  frame,
2275  &color,
2276  &plane_count,
2277  widths,
2278  heights,
2279  strides,
2280  &contiguous_buf_size
2281  );
2282 
2283  if (status) {
2284  av_log(
2285  avctx,
2286  AV_LOG_ERROR,
2287  "Error: Cannot convert format %d color_range %d: %d\n",
2288  frame->format,
2289  frame->color_range,
2290  status
2291  );
2292 
2293  return AVERROR_EXTERNAL;
2294  }
2295 
2296  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2297  if (!pix_buf_pool) {
2298  /* On iOS, the VT session is invalidated when the APP switches from
2299  * foreground to background and vice versa. Fetch the actual error code
2300  * of the VT session to detect that case and restart the VT session
2301  * accordingly. */
2302  OSStatus vtstatus;
2303 
2304  vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2305  if (vtstatus == kVTInvalidSessionErr) {
2306  CFRelease(vtctx->session);
2307  vtctx->session = NULL;
2309  if (status == 0)
2310  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2311  }
2312  if (!pix_buf_pool) {
2313  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2314  return AVERROR_EXTERNAL;
2315  }
2316  else
2317  av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2318  "kVTInvalidSessionErr error.\n");
2319  }
2320 
2321  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2322  pix_buf_pool,
2323  cv_img);
2324 
2325 
2326  if (status) {
2327  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2328  return AVERROR_EXTERNAL;
2329  }
2330 
2331  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2332  if (status) {
2333  CFRelease(*cv_img);
2334  *cv_img = NULL;
2335  return status;
2336  }
2337 
2338  return 0;
2339 }
2340 
2342  CFDictionaryRef* dict_out)
2343 {
2344  CFDictionaryRef dict = NULL;
2345  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2346  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2347  const void *vals[] = { kCFBooleanTrue };
2348 
2349  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2350  if(!dict) return AVERROR(ENOMEM);
2351  }
2352 
2353  *dict_out = dict;
2354  return 0;
2355 }
2356 
2358  VTEncContext *vtctx,
2359  const AVFrame *frame)
2360 {
2361  CMTime time;
2362  CFDictionaryRef frame_dict;
2363  CVPixelBufferRef cv_img = NULL;
2364  AVFrameSideData *side_data = NULL;
2365  ExtraSEI *sei = NULL;
2366  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2367 
2368  if (status) return status;
2369 
2370  status = create_encoder_dict_h264(frame, &frame_dict);
2371  if (status) {
2372  CFRelease(cv_img);
2373  return status;
2374  }
2375 
2377  if (vtctx->a53_cc && side_data && side_data->size) {
2378  sei = av_mallocz(sizeof(*sei));
2379  if (!sei) {
2380  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2381  } else {
2382  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2383  if (ret < 0) {
2384  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2385  av_free(sei);
2386  sei = NULL;
2387  }
2388  }
2389  }
2390 
2391  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2392  status = VTCompressionSessionEncodeFrame(
2393  vtctx->session,
2394  cv_img,
2395  time,
2396  kCMTimeInvalid,
2397  frame_dict,
2398  sei,
2399  NULL
2400  );
2401 
2402  if (frame_dict) CFRelease(frame_dict);
2403  CFRelease(cv_img);
2404 
2405  if (status) {
2406  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2407  return AVERROR_EXTERNAL;
2408  }
2409 
2410  return 0;
2411 }
2412 
2414  AVCodecContext *avctx,
2415  AVPacket *pkt,
2416  const AVFrame *frame,
2417  int *got_packet)
2418 {
2419  VTEncContext *vtctx = avctx->priv_data;
2420  bool get_frame;
2421  int status;
2422  CMSampleBufferRef buf = NULL;
2423  ExtraSEI *sei = NULL;
2424 
2425  if (frame) {
2426  status = vtenc_send_frame(avctx, vtctx, frame);
2427 
2428  if (status) {
2430  goto end_nopkt;
2431  }
2432 
2433  if (vtctx->frame_ct_in == 0) {
2434  vtctx->first_pts = frame->pts;
2435  } else if(vtctx->frame_ct_in == vtctx->has_b_frames) {
2436  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2437  }
2438 
2439  vtctx->frame_ct_in++;
2440  } else if(!vtctx->flushing) {
2441  vtctx->flushing = true;
2442 
2443  status = VTCompressionSessionCompleteFrames(vtctx->session,
2444  kCMTimeIndefinite);
2445 
2446  if (status) {
2447  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2449  goto end_nopkt;
2450  }
2451  }
2452 
2453  *got_packet = 0;
2454  get_frame = vtctx->dts_delta >= 0 || !frame;
2455  if (!get_frame) {
2456  status = 0;
2457  goto end_nopkt;
2458  }
2459 
2460  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2461  if (status) goto end_nopkt;
2462  if (!buf) goto end_nopkt;
2463 
2464  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2465  if (sei) {
2466  if (sei->data) av_free(sei->data);
2467  av_free(sei);
2468  }
2469  CFRelease(buf);
2470  if (status) goto end_nopkt;
2471 
2472  *got_packet = 1;
2473  return 0;
2474 
2475 end_nopkt:
2477  return status;
2478 }
2479 
2481  CMVideoCodecType codec_type,
2482  CFStringRef profile_level,
2483  CFNumberRef gamma_level,
2484  CFDictionaryRef enc_info,
2485  CFDictionaryRef pixel_buffer_info)
2486 {
2487  VTEncContext *vtctx = avctx->priv_data;
2488  int status;
2489  CVPixelBufferPoolRef pool = NULL;
2490  CVPixelBufferRef pix_buf = NULL;
2491  CMTime time;
2492  CMSampleBufferRef buf = NULL;
2493 
2494  status = vtenc_create_encoder(avctx,
2495  codec_type,
2496  profile_level,
2497  gamma_level,
2498  enc_info,
2499  pixel_buffer_info,
2500  &vtctx->session);
2501  if (status)
2502  goto pe_cleanup;
2503 
2504  pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2505  if(!pool){
2506  av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2507  goto pe_cleanup;
2508  }
2509 
2510  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2511  pool,
2512  &pix_buf);
2513 
2514  if(status != kCVReturnSuccess){
2515  av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2516  goto pe_cleanup;
2517  }
2518 
2519  time = CMTimeMake(0, avctx->time_base.den);
2520  status = VTCompressionSessionEncodeFrame(vtctx->session,
2521  pix_buf,
2522  time,
2523  kCMTimeInvalid,
2524  NULL,
2525  NULL,
2526  NULL);
2527 
2528  if (status) {
2529  av_log(avctx,
2530  AV_LOG_ERROR,
2531  "Error sending frame for extradata: %d\n",
2532  status);
2533 
2534  goto pe_cleanup;
2535  }
2536 
2537  //Populates extradata - output frames are flushed and param sets are available.
2538  status = VTCompressionSessionCompleteFrames(vtctx->session,
2539  kCMTimeIndefinite);
2540 
2541  if (status)
2542  goto pe_cleanup;
2543 
2544  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2545  if (status) {
2546  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2547  goto pe_cleanup;
2548  }
2549 
2550  CFRelease(buf);
2551 
2552 
2553 
2554 pe_cleanup:
2555  if(vtctx->session)
2556  CFRelease(vtctx->session);
2557 
2558  vtctx->session = NULL;
2559  vtctx->frame_ct_out = 0;
2560 
2561  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2562 
2563  return status;
2564 }
2565 
2567 {
2568  VTEncContext *vtctx = avctx->priv_data;
2569 
2570  if(!vtctx->session) {
2572  pthread_mutex_destroy(&vtctx->lock);
2573  return 0;
2574  }
2575 
2576  VTCompressionSessionCompleteFrames(vtctx->session,
2577  kCMTimeIndefinite);
2578  clear_frame_queue(vtctx);
2580  pthread_mutex_destroy(&vtctx->lock);
2581  CFRelease(vtctx->session);
2582  vtctx->session = NULL;
2583 
2584  if (vtctx->color_primaries) {
2585  CFRelease(vtctx->color_primaries);
2586  vtctx->color_primaries = NULL;
2587  }
2588 
2589  if (vtctx->transfer_function) {
2590  CFRelease(vtctx->transfer_function);
2591  vtctx->transfer_function = NULL;
2592  }
2593 
2594  if (vtctx->ycbcr_matrix) {
2595  CFRelease(vtctx->ycbcr_matrix);
2596  vtctx->ycbcr_matrix = NULL;
2597  }
2598 
2599  return 0;
2600 }
2601 
2602 static const enum AVPixelFormat avc_pix_fmts[] = {
2607 };
2608 
2609 static const enum AVPixelFormat hevc_pix_fmts[] = {
2616 };
2617 
2618 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2619 #define COMMON_OPTIONS \
2620  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2621  { .i64 = 0 }, 0, 1, VE }, \
2622  { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2623  { .i64 = 0 }, 0, 1, VE }, \
2624  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2625  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2626  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2627  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2628  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2629  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2630 
2631 #define OFFSET(x) offsetof(VTEncContext, x)
2632 static const AVOption h264_options[] = {
2633  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2634  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2635  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2636  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2637  { "extended", "Extend Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2638 
2639  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2640  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2641  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2642  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2643  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2644  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2645  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2646  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2647  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2648  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2649  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2650 
2651  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2652  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2653  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2654  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2655  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2656 
2657  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2658 
2660  { NULL },
2661 };
2662 
2664  .class_name = "h264_videotoolbox",
2665  .item_name = av_default_item_name,
2666  .option = h264_options,
2667  .version = LIBAVUTIL_VERSION_INT,
2668 };
2669 
2671  .name = "h264_videotoolbox",
2672  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2673  .type = AVMEDIA_TYPE_VIDEO,
2674  .id = AV_CODEC_ID_H264,
2675  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
2676  .priv_data_size = sizeof(VTEncContext),
2678  .init = vtenc_init,
2679  .encode2 = vtenc_frame,
2680  .close = vtenc_close,
2681  .priv_class = &h264_videotoolbox_class,
2682  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2684 };
2685 
2686 static const AVOption hevc_options[] = {
2687  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2688  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2689  { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2690 
2691  { "alpha_quality", "Compression quality for the alpha channel", OFFSET(alpha_quality), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0.0, 1.0, VE },
2692 
2694  { NULL },
2695 };
2696 
2698  .class_name = "hevc_videotoolbox",
2699  .item_name = av_default_item_name,
2700  .option = hevc_options,
2701  .version = LIBAVUTIL_VERSION_INT,
2702 };
2703 
2705  .name = "hevc_videotoolbox",
2706  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2707  .type = AVMEDIA_TYPE_VIDEO,
2708  .id = AV_CODEC_ID_HEVC,
2709  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
2711  .priv_data_size = sizeof(VTEncContext),
2713  .init = vtenc_init,
2714  .encode2 = vtenc_frame,
2715  .close = vtenc_close,
2716  .priv_class = &hevc_videotoolbox_class,
2717  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2719  .wrapper_name = "videotoolbox",
2720 };
ff_hevc_videotoolbox_encoder
const AVCodec ff_hevc_videotoolbox_encoder
Definition: videotoolboxenc.c:2704
get_vt_hevc_profile_level
static bool get_vt_hevc_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:761
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
kVTProfileLevel_H264_Main_5_1
CFStringRef kVTProfileLevel_H264_Main_5_1
Definition: videotoolboxenc.c:81
H264_PROF_EXTENDED
@ H264_PROF_EXTENDED
Definition: videotoolboxenc.c:174
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:403
AVCodec
AVCodec.
Definition: codec.h:197
kVTCompressionPropertyKey_H264EntropyMode
CFStringRef kVTCompressionPropertyKey_H264EntropyMode
Definition: videotoolboxenc.c:70
ff_alloc_a53_sei
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: atsc_a53.c:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:187
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
kVTProfileLevel_H264_Extended_AutoLevel
CFStringRef kVTProfileLevel_H264_Extended_AutoLevel
Definition: videotoolboxenc.c:94
ExtraSEI::size
size_t size
Definition: videotoolboxenc.c:195
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
level
uint8_t level
Definition: svq3.c:204
kVTCompressionPropertyKey_RealTime
CFStringRef kVTCompressionPropertyKey_RealTime
Definition: videotoolboxenc.c:99
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
hevc_pix_fmts
static enum AVPixelFormat hevc_pix_fmts[]
Definition: videotoolboxenc.c:2609
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:666
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:949
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:473
color
Definition: vf_paletteuse.c:583
vtenc_populate_extradata
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
Definition: videotoolboxenc.c:2480
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
vtenc_cm_to_avpacket
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
Definition: videotoolboxenc.c:1928
AV_CODEC_CAP_HARDWARE
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
Definition: codec.h:157
H264_PROF_AUTO
@ H264_PROF_AUTO
Definition: videotoolboxenc.c:170
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:482
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
copy_avframe_to_pixel_buffer
static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef cv_img, const size_t *plane_strides, const size_t *plane_rows)
Definition: videotoolboxenc.c:2152
vtenc_output_callback
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:583
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:215
get_vt_h264_profile_level
static bool get_vt_h264_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:656
profile
mfxU16 profile
Definition: qsvenc.c:45
write_sei
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:1681
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:191
pixdesc.h
kVTProfileLevel_H264_High_AutoLevel
CFStringRef kVTProfileLevel_H264_High_AutoLevel
Definition: videotoolboxenc.c:92
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:942
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
AVOption
AVOption.
Definition: opt.h:248
encode.h
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:476
kVTProfileLevel_H264_High_4_0
CFStringRef kVTProfileLevel_H264_High_4_0
Definition: videotoolboxenc.c:87
VTEncContext::frames_before
int64_t frames_before
Definition: videotoolboxenc.c:232
VTEncContext::lock
pthread_mutex_t lock
Definition: videotoolboxenc.c:214
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:489
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
VTEncContext::profile
int64_t profile
Definition: videotoolboxenc.c:228
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:448
HEVC_PROF_MAIN10
@ HEVC_PROF_MAIN10
Definition: videotoolboxenc.c:187
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
BufNode::sei
ExtraSEI * sei
Definition: videotoolboxenc.c:200
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:396
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:269
copy_replace_length_codes
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
Definition: videotoolboxenc.c:1754
vtenc_create_encoder
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
Definition: videotoolboxenc.c:1044
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
VTEncContext::level
int64_t level
Definition: videotoolboxenc.c:229
vtenc_get_frame_info
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
Definition: videotoolboxenc.c:1528
get_cv_pixel_format
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
Definition: videotoolboxenc.c:790
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2929
VTEncContext::frames_after
int64_t frames_after
Definition: videotoolboxenc.c:233
vtenc_close
static av_cold int vtenc_close(AVCodecContext *avctx)
Definition: videotoolboxenc.c:2566
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:577
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:479
add_color_attr
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
Definition: videotoolboxenc.c:821
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:464
kCVImageBufferYCbCrMatrix_ITU_R_2020
CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020
Definition: videotoolboxenc.c:68
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
pts
static int64_t pts
Definition: transcode_aac.c:652
VTEncContext::flushing
bool flushing
Definition: videotoolboxenc.c:239
HEVC_PROF_COUNT
@ HEVC_PROF_COUNT
Definition: videotoolboxenc.c:188
create_encoder_dict_h264
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
Definition: videotoolboxenc.c:2341
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:478
kVTProfileLevel_HEVC_Main10_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel
Definition: videotoolboxenc.c:97
h264_options
static const AVOption h264_options[]
Definition: videotoolboxenc.c:2632
ff_h264_videotoolbox_encoder
const AVCodec ff_h264_videotoolbox_encoder
Definition: videotoolboxenc.c:2670
avassert.h
get_params_size
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
Definition: videotoolboxenc.c:431
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:935
VTEncContext::dts_delta
int64_t dts_delta
Definition: videotoolboxenc.c:226
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
AVFrameSideData::size
size_t size
Definition: frame.h:212
av_cold
#define av_cold
Definition: attributes.h:90
VTEncContext::first_pts
int64_t first_pts
Definition: videotoolboxenc.c:225
avc_pix_fmts
static enum AVPixelFormat avc_pix_fmts[]
Definition: videotoolboxenc.c:2602
kVTProfileLevel_H264_High_4_2
CFStringRef kVTProfileLevel_H264_High_4_2
Definition: videotoolboxenc.c:89
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:486
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:668
VTEncContext::async_error
int async_error
Definition: videotoolboxenc.c:217
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2686
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:450
VT_H264Profile
VT_H264Profile
Definition: videotoolboxenc.c:169
VT_CABAC
@ VT_CABAC
Definition: videotoolboxenc.c:181
kCMVideoCodecType_HEVCWithAlpha
@ kCMVideoCodecType_HEVCWithAlpha
Definition: videotoolboxenc.c:45
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:509
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
VTEncContext::cv_sample_sent
pthread_cond_t cv_sample_sent
Definition: videotoolboxenc.c:215
VTEncContext::transfer_function
CFStringRef transfer_function
Definition: videotoolboxenc.c:211
info
MIPS optimizations info
Definition: mips.txt:2
get_cm_codec_type
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id, enum AVPixelFormat fmt, double alpha_quality)
Definition: videotoolboxenc.c:408
H264_PROF_MAIN
@ H264_PROF_MAIN
Definition: videotoolboxenc.c:172
loadVTEncSymbols
static void loadVTEncSymbols()
Definition: videotoolboxenc.c:119
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
Definition: videotoolboxenc.c:49
VTEncContext::alpha_quality
double alpha_quality
Definition: videotoolboxenc.c:237
CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
Definition: videotoolboxenc.c:105
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HEVC_PROF_AUTO
@ HEVC_PROF_AUTO
Definition: videotoolboxenc.c:185
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1183
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:451
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:455
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:394
getParameterSetAtIndex
OSStatus(* getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc, size_t parameterSetIndex, const uint8_t **parameterSetPointerOut, size_t *parameterSetSizeOut, size_t *parameterSetCountOut, int *NALUnitHeaderLengthOut)
Definition: videotoolboxenc.c:57
set_extradata
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:549
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:456
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2869
VTEncContext::frame_ct_in
int64_t frame_ct_in
Definition: videotoolboxenc.c:223
kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:103
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
kVTProfileLevel_HEVC_Main_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel
Definition: videotoolboxenc.c:96
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:956
BufNode
Definition: videotoolboxenc.c:198
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
kVTProfileLevel_H264_Baseline_5_2
CFStringRef kVTProfileLevel_H264_Baseline_5_2
Definition: videotoolboxenc.c:78
h264_videotoolbox_class
static const AVClass h264_videotoolbox_class
Definition: videotoolboxenc.c:2663
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:434
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:450
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
create_cv_pixel_buffer_info
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
Definition: videotoolboxenc.c:843
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2887
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:488
pthread_once
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
Definition: os2threads.h:210
H264_PROF_BASELINE
@ H264_PROF_BASELINE
Definition: videotoolboxenc.c:171
vtenc_qscale_enabled
static bool vtenc_qscale_enabled(void)
Definition: videotoolboxenc.c:1039
VTH264Entropy
VTH264Entropy
Definition: videotoolboxenc.c:178
sei
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
Definition: cbs_h264_syntax_template.c:742
ExtraSEI::data
void * data
Definition: videotoolboxenc.c:194
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:542
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:67
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:47
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:459
get_cv_transfer_function
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
Definition: videotoolboxenc.c:944
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:507
AVCOL_TRC_SMPTE2084
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:490
VTEncContext::a53_cc
int a53_cc
Definition: videotoolboxenc.c:244
VT_ENTROPY_NOT_SET
@ VT_ENTROPY_NOT_SET
Definition: videotoolboxenc.c:179
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:366
VTEncContext::realtime
int64_t realtime
Definition: videotoolboxenc.c:231
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:579
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:481
vt_release_num
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
Definition: videotoolboxenc.c:257
kCVImageBufferTransferFunction_ITU_R_2020
CFStringRef kCVImageBufferTransferFunction_ITU_R_2020
Definition: videotoolboxenc.c:67
size
int size
Definition: twinvq_data.h:10344
VTEncContext::allow_sw
int64_t allow_sw
Definition: videotoolboxenc.c:235
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:304
H264_PROF_COUNT
@ H264_PROF_COUNT
Definition: videotoolboxenc.c:175
VTEncContext::frame_ct_out
int64_t frame_ct_out
Definition: videotoolboxenc.c:222
create_cv_pixel_buffer
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
Definition: videotoolboxenc.c:2244
VTEncContext::entropy
int64_t entropy
Definition: videotoolboxenc.c:230
kVTProfileLevel_H264_Baseline_4_2
CFStringRef kVTProfileLevel_H264_Baseline_4_2
Definition: videotoolboxenc.c:75
kVTProfileLevel_H264_Main_5_2
CFStringRef kVTProfileLevel_H264_Main_5_2
Definition: videotoolboxenc.c:82
hevc_videotoolbox_class
static const AVClass hevc_videotoolbox_class
Definition: videotoolboxenc.c:2697
kVTProfileLevel_H264_Main_AutoLevel
CFStringRef kVTProfileLevel_H264_Main_AutoLevel
Definition: videotoolboxenc.c:83
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:364
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ExtraSEI
Definition: videotoolboxenc.c:193
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:371
compat_keys
static struct @163 compat_keys
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:475
kVTH264EntropyMode_CABAC
CFStringRef kVTH264EntropyMode_CABAC
Definition: videotoolboxenc.c:72
VTEncContext::get_param_set_func
getParameterSetAtIndex get_param_set_func
Definition: videotoolboxenc.c:212
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
kVTProfileLevel_H264_Baseline_AutoLevel
CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel
Definition: videotoolboxenc.c:79
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolboxenc.c:41
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:510
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
h264_sei.h
TARGET_CPU_ARM64
#define TARGET_CPU_ARM64
Definition: videotoolboxenc.c:54
BufNode::error
int error
Definition: videotoolboxenc.c:202
vtenc_q_pop
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
Definition: videotoolboxenc.c:292
set_async_error
static void set_async_error(VTEncContext *vtctx, int err)
Definition: videotoolboxenc.c:266
i
int i
Definition: input.c:407
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: videotoolboxenc.c:2619
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:358
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:513
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:485
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
Definition: videotoolboxenc.c:50
VTEncContext::ycbcr_matrix
CFStringRef ycbcr_matrix
Definition: videotoolboxenc.c:209
VTEncContext::require_sw
int64_t require_sw
Definition: videotoolboxenc.c:236
GET_SYM
#define GET_SYM(symbol, defaultVal)
Definition: videotoolboxenc.c:108
copy_emulation_prev
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
Definition: videotoolboxenc.c:1630
VTEncContext::has_b_frames
int has_b_frames
Definition: videotoolboxenc.c:240
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
kVTProfileLevel_H264_Baseline_5_1
CFStringRef kVTProfileLevel_H264_Baseline_5_1
Definition: videotoolboxenc.c:77
count_nalus
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
Definition: videotoolboxenc.c:369
is_post_sei_nal_type
static int is_post_sei_nal_type(int nal_type)
Definition: videotoolboxenc.c:1555
BufNode::next
struct BufNode * next
Definition: videotoolboxenc.c:201
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:244
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
len
int len
Definition: vorbis_enc_data.h:426
pthread_cond_t
Definition: os2threads.h:58
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:505
AVCodecContext::height
int height
Definition: avcodec.h:557
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:594
vtenc_q_push
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
Definition: videotoolboxenc.c:342
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:559
once_ctrl
static pthread_once_t once_ctrl
Definition: videotoolboxenc.c:117
kVTProfileLevel_H264_Baseline_5_0
CFStringRef kVTProfileLevel_H264_Baseline_5_0
Definition: videotoolboxenc.c:76
avcodec.h
ret
ret
Definition: filter_design.txt:187
VTEncContext
Definition: videotoolboxenc.c:205
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
atsc_a53.h
kVTProfileLevel_H264_Baseline_4_0
CFStringRef kVTProfileLevel_H264_Baseline_4_0
Definition: videotoolboxenc.c:74
clear_frame_queue
static void clear_frame_queue(VTEncContext *vtctx)
Definition: videotoolboxenc.c:287
vtenc_configure_encoder
static int vtenc_configure_encoder(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1384
kVTProfileLevel_H264_High_5_2
CFStringRef kVTProfileLevel_H264_High_5_2
Definition: videotoolboxenc.c:91
VTEncContext::session
VTCompressionSessionRef session
Definition: videotoolboxenc.c:208
HEVC_PROF_MAIN
@ HEVC_PROF_MAIN
Definition: videotoolboxenc.c:186
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AVCodecContext
main external API structure.
Definition: avcodec.h:384
VT_HEVCProfile
VT_HEVCProfile
Definition: videotoolboxenc.c:184
AVCOL_TRC_ARIB_STD_B67
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
Definition: pixfmt.h:494
kCVImageBufferColorPrimaries_ITU_R_2020
CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020
Definition: videotoolboxenc.c:66
kVTH264EntropyMode_CAVLC
CFStringRef kVTH264EntropyMode_CAVLC
Definition: videotoolboxenc.c:71
kVTProfileLevel_H264_High_3_1
CFStringRef kVTProfileLevel_H264_High_3_1
Definition: videotoolboxenc.c:85
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:102
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:77
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
get_cv_pixel_info
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Definition: videotoolboxenc.c:2037
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
pthread_once_t
Definition: os2threads.h:66
copy_param_sets
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:485
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
get_cv_ycbcr_matrix
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
Definition: videotoolboxenc.c:1007
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
OFFSET
#define OFFSET(x)
Definition: videotoolboxenc.c:2631
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
kVTProfileLevel_H264_Extended_5_0
CFStringRef kVTProfileLevel_H264_Extended_5_0
Definition: videotoolboxenc.c:93
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:644
kVTProfileLevel_H264_High_5_1
CFStringRef kVTProfileLevel_H264_High_5_1
Definition: videotoolboxenc.c:90
vtenc_send_frame
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
Definition: videotoolboxenc.c:2357
VE
#define VE
Definition: videotoolboxenc.c:2618
kVTProfileLevel_H264_High_4_1
CFStringRef kVTProfileLevel_H264_High_4_1
Definition: videotoolboxenc.c:88
AV_PIX_FMT_P010LE
@ AV_PIX_FMT_P010LE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:274
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:411
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
get_sei_msg_bytes
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
Definition: videotoolboxenc.c:1911
vtenc_frame
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: videotoolboxenc.c:2413
kVTProfileLevel_H264_High_3_2
CFStringRef kVTProfileLevel_H264_High_3_2
Definition: videotoolboxenc.c:86
SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35
@ SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35
Definition: sei.h:34
kVTProfileLevel_H264_High_3_0
CFStringRef kVTProfileLevel_H264_High_3_0
Definition: videotoolboxenc.c:84
find_sei_end
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
Definition: videotoolboxenc.c:1566
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:557
VTEncContext::q_tail
BufNode * q_tail
Definition: videotoolboxenc.c:220
h264.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
VTEncContext::codec_id
enum AVCodecID codec_id
Definition: videotoolboxenc.c:207
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
VTEncContext::q_head
BufNode * q_head
Definition: videotoolboxenc.c:219
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:504
H264_PROF_HIGH
@ H264_PROF_HIGH
Definition: videotoolboxenc.c:173
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:541
int
int
Definition: ffmpeg_filter.c:156
kVTProfileLevel_H264_Main_4_2
CFStringRef kVTProfileLevel_H264_Main_4_2
Definition: videotoolboxenc.c:80
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
VT_CAVLC
@ VT_CAVLC
Definition: videotoolboxenc.c:180
PTHREAD_ONCE_INIT
#define PTHREAD_ONCE_INIT
Definition: os2threads.h:71
VTEncContext::color_primaries
CFStringRef color_primaries
Definition: videotoolboxenc.c:210
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2908
BufNode::cm_buffer
CMSampleBufferRef cm_buffer
Definition: videotoolboxenc.c:199
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:742
VTEncContext::warned_color_range
bool warned_color_range
Definition: videotoolboxenc.c:241
get_length_code_size
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
Definition: videotoolboxenc.c:619
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2461
vtenc_init
static av_cold int vtenc_init(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1497
get_cv_color_primaries
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
Definition: videotoolboxenc.c:910
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:63
kVTCompressionPropertyKey_TargetQualityForAlpha
CFStringRef kVTCompressionPropertyKey_TargetQualityForAlpha
Definition: videotoolboxenc.c:100