FFmpeg
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "atsc_a53.h"
35 #include "encode.h"
36 #include "h264.h"
37 #include "h264_sei.h"
38 #include <dlfcn.h>
39 
40 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
41 enum { kCMVideoCodecType_HEVC = 'hvc1' };
42 #endif
43 
44 #if !HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA
46 #endif
47 
48 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
51 #endif
52 
53 #ifndef TARGET_CPU_ARM64
54 # define TARGET_CPU_ARM64 0
55 #endif
56 
57 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
58  size_t parameterSetIndex,
59  const uint8_t **parameterSetPointerOut,
60  size_t *parameterSetSizeOut,
61  size_t *parameterSetCountOut,
62  int *NALUnitHeaderLengthOut);
63 
64 //These symbols may not be present
65 static struct{
69 
73 
95 
98 
101 
104 
106 } compat_keys;
107 
108 #define GET_SYM(symbol, defaultVal) \
109 do{ \
110  CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
111  if(!handle) \
112  compat_keys.symbol = CFSTR(defaultVal); \
113  else \
114  compat_keys.symbol = *handle; \
115 }while(0)
116 
118 
119 static void loadVTEncSymbols(){
120  compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
121  (getParameterSetAtIndex)dlsym(
122  RTLD_DEFAULT,
123  "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
124  );
125 
129 
133 
134  GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
135  GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
136  GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
137  GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
138  GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
139  GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
140  GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
141  GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
142  GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
143  GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
144  GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
145  GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
146  GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
147  GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
148  GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
149  GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
150  GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
151  GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
152  GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
153  GET_SYM(kVTProfileLevel_H264_Extended_5_0, "H264_Extended_5_0");
154  GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
155 
156  GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
157  GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
158 
161  "TargetQualityForAlpha");
162 
164  "EnableHardwareAcceleratedVideoEncoder");
166  "RequireHardwareAcceleratedVideoEncoder");
167 }
168 
169 typedef enum VT_H264Profile {
177 
178 typedef enum VTH264Entropy{
182 } VTH264Entropy;
183 
184 typedef enum VT_HEVCProfile {
190 
191 static const uint8_t start_code[] = { 0, 0, 0, 1 };
192 
193 typedef struct ExtraSEI {
194  void *data;
195  size_t size;
196 } ExtraSEI;
197 
198 typedef struct BufNode {
199  CMSampleBufferRef cm_buffer;
201  struct BufNode* next;
202  int error;
203 } BufNode;
204 
205 typedef struct VTEncContext {
206  AVClass *class;
208  VTCompressionSessionRef session;
209  CFStringRef ycbcr_matrix;
210  CFStringRef color_primaries;
211  CFStringRef transfer_function;
213 
216 
218 
221 
222  int64_t frame_ct_out;
223  int64_t frame_ct_in;
224 
225  int64_t first_pts;
226  int64_t dts_delta;
227 
228  int64_t profile;
229  int64_t level;
230  int64_t entropy;
231  int64_t realtime;
232  int64_t frames_before;
233  int64_t frames_after;
234 
235  int64_t allow_sw;
236  int64_t require_sw;
238 
239  bool flushing;
242 
243  /* can't be bool type since AVOption will access it as int */
244  int a53_cc;
245 } VTEncContext;
246 
247 static int vtenc_populate_extradata(AVCodecContext *avctx,
248  CMVideoCodecType codec_type,
249  CFStringRef profile_level,
250  CFNumberRef gamma_level,
251  CFDictionaryRef enc_info,
252  CFDictionaryRef pixel_buffer_info);
253 
254 /**
255  * NULL-safe release of *refPtr, and sets value to NULL.
256  */
257 static void vt_release_num(CFNumberRef* refPtr){
258  if (!*refPtr) {
259  return;
260  }
261 
262  CFRelease(*refPtr);
263  *refPtr = NULL;
264 }
265 
266 static void set_async_error(VTEncContext *vtctx, int err)
267 {
268  BufNode *info;
269 
270  pthread_mutex_lock(&vtctx->lock);
271 
272  vtctx->async_error = err;
273 
274  info = vtctx->q_head;
275  vtctx->q_head = vtctx->q_tail = NULL;
276 
277  while (info) {
278  BufNode *next = info->next;
279  CFRelease(info->cm_buffer);
280  av_free(info);
281  info = next;
282  }
283 
284  pthread_mutex_unlock(&vtctx->lock);
285 }
286 
287 static void clear_frame_queue(VTEncContext *vtctx)
288 {
289  set_async_error(vtctx, 0);
290 }
291 
292 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
293 {
294  BufNode *info;
295 
296  pthread_mutex_lock(&vtctx->lock);
297 
298  if (vtctx->async_error) {
299  pthread_mutex_unlock(&vtctx->lock);
300  return vtctx->async_error;
301  }
302 
303  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
304  *buf = NULL;
305 
306  pthread_mutex_unlock(&vtctx->lock);
307  return 0;
308  }
309 
310  while (!vtctx->q_head && !vtctx->async_error && wait && !vtctx->flushing) {
311  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
312  }
313 
314  if (!vtctx->q_head) {
315  pthread_mutex_unlock(&vtctx->lock);
316  *buf = NULL;
317  return 0;
318  }
319 
320  info = vtctx->q_head;
321  vtctx->q_head = vtctx->q_head->next;
322  if (!vtctx->q_head) {
323  vtctx->q_tail = NULL;
324  }
325 
326  vtctx->frame_ct_out++;
327  pthread_mutex_unlock(&vtctx->lock);
328 
329  *buf = info->cm_buffer;
330  if (sei && *buf) {
331  *sei = info->sei;
332  } else if (info->sei) {
333  if (info->sei->data) av_free(info->sei->data);
334  av_free(info->sei);
335  }
336  av_free(info);
337 
338 
339  return 0;
340 }
341 
342 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
343 {
344  BufNode *info = av_malloc(sizeof(BufNode));
345  if (!info) {
346  set_async_error(vtctx, AVERROR(ENOMEM));
347  return;
348  }
349 
350  CFRetain(buffer);
351  info->cm_buffer = buffer;
352  info->sei = sei;
353  info->next = NULL;
354 
355  pthread_mutex_lock(&vtctx->lock);
356 
357  if (!vtctx->q_head) {
358  vtctx->q_head = info;
359  } else {
360  vtctx->q_tail->next = info;
361  }
362 
363  vtctx->q_tail = info;
364 
366  pthread_mutex_unlock(&vtctx->lock);
367 }
368 
369 static int count_nalus(size_t length_code_size,
370  CMSampleBufferRef sample_buffer,
371  int *count)
372 {
373  size_t offset = 0;
374  int status;
375  int nalu_ct = 0;
376  uint8_t size_buf[4];
377  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
378  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
379 
380  if (length_code_size > 4)
381  return AVERROR_INVALIDDATA;
382 
383  while (offset < src_size) {
384  size_t curr_src_len;
385  size_t box_len = 0;
386  size_t i;
387 
388  status = CMBlockBufferCopyDataBytes(block,
389  offset,
390  length_code_size,
391  size_buf);
392 
393  for (i = 0; i < length_code_size; i++) {
394  box_len <<= 8;
395  box_len |= size_buf[i];
396  }
397 
398  curr_src_len = box_len + length_code_size;
399  offset += curr_src_len;
400 
401  nalu_ct++;
402  }
403 
404  *count = nalu_ct;
405  return 0;
406 }
407 
408 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id,
409  enum AVPixelFormat fmt,
410  double alpha_quality)
411 {
412  switch (id) {
413  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
414  case AV_CODEC_ID_HEVC:
415  if (fmt == AV_PIX_FMT_BGRA && alpha_quality > 0.0) {
417  }
418  return kCMVideoCodecType_HEVC;
419  default: return 0;
420  }
421 }
422 
423 /**
424  * Get the parameter sets from a CMSampleBufferRef.
425  * @param dst If *dst isn't NULL, the parameters are copied into existing
426  * memory. *dst_size must be set accordingly when *dst != NULL.
427  * If *dst is NULL, it will be allocated.
428  * In all cases, *dst_size is set to the number of bytes used starting
429  * at *dst.
430  */
431 static int get_params_size(
432  AVCodecContext *avctx,
433  CMVideoFormatDescriptionRef vid_fmt,
434  size_t *size)
435 {
436  VTEncContext *vtctx = avctx->priv_data;
437  size_t total_size = 0;
438  size_t ps_count;
439  int is_count_bad = 0;
440  size_t i;
441  int status;
442  status = vtctx->get_param_set_func(vid_fmt,
443  0,
444  NULL,
445  NULL,
446  &ps_count,
447  NULL);
448  if (status) {
449  is_count_bad = 1;
450  ps_count = 0;
451  status = 0;
452  }
453 
454  for (i = 0; i < ps_count || is_count_bad; i++) {
455  const uint8_t *ps;
456  size_t ps_size;
457  status = vtctx->get_param_set_func(vid_fmt,
458  i,
459  &ps,
460  &ps_size,
461  NULL,
462  NULL);
463  if (status) {
464  /*
465  * When ps_count is invalid, status != 0 ends the loop normally
466  * unless we didn't get any parameter sets.
467  */
468  if (i > 0 && is_count_bad) status = 0;
469 
470  break;
471  }
472 
473  total_size += ps_size + sizeof(start_code);
474  }
475 
476  if (status) {
477  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
478  return AVERROR_EXTERNAL;
479  }
480 
481  *size = total_size;
482  return 0;
483 }
484 
485 static int copy_param_sets(
486  AVCodecContext *avctx,
487  CMVideoFormatDescriptionRef vid_fmt,
488  uint8_t *dst,
489  size_t dst_size)
490 {
491  VTEncContext *vtctx = avctx->priv_data;
492  size_t ps_count;
493  int is_count_bad = 0;
494  int status;
495  size_t offset = 0;
496  size_t i;
497 
498  status = vtctx->get_param_set_func(vid_fmt,
499  0,
500  NULL,
501  NULL,
502  &ps_count,
503  NULL);
504  if (status) {
505  is_count_bad = 1;
506  ps_count = 0;
507  status = 0;
508  }
509 
510 
511  for (i = 0; i < ps_count || is_count_bad; i++) {
512  const uint8_t *ps;
513  size_t ps_size;
514  size_t next_offset;
515 
516  status = vtctx->get_param_set_func(vid_fmt,
517  i,
518  &ps,
519  &ps_size,
520  NULL,
521  NULL);
522  if (status) {
523  if (i > 0 && is_count_bad) status = 0;
524 
525  break;
526  }
527 
528  next_offset = offset + sizeof(start_code) + ps_size;
529  if (dst_size < next_offset) {
530  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
532  }
533 
534  memcpy(dst + offset, start_code, sizeof(start_code));
535  offset += sizeof(start_code);
536 
537  memcpy(dst + offset, ps, ps_size);
538  offset = next_offset;
539  }
540 
541  if (status) {
542  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
543  return AVERROR_EXTERNAL;
544  }
545 
546  return 0;
547 }
548 
549 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
550 {
551  CMVideoFormatDescriptionRef vid_fmt;
552  size_t total_size;
553  int status;
554 
555  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
556  if (!vid_fmt) {
557  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
558  return AVERROR_EXTERNAL;
559  }
560 
561  status = get_params_size(avctx, vid_fmt, &total_size);
562  if (status) {
563  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
564  return status;
565  }
566 
567  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
568  if (!avctx->extradata) {
569  return AVERROR(ENOMEM);
570  }
571  avctx->extradata_size = total_size;
572 
573  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
574 
575  if (status) {
576  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
577  return status;
578  }
579 
580  return 0;
581 }
582 
584  void *ctx,
585  void *sourceFrameCtx,
586  OSStatus status,
587  VTEncodeInfoFlags flags,
588  CMSampleBufferRef sample_buffer)
589 {
590  AVCodecContext *avctx = ctx;
591  VTEncContext *vtctx = avctx->priv_data;
592  ExtraSEI *sei = sourceFrameCtx;
593 
594  if (vtctx->async_error) {
595  return;
596  }
597 
598  if (status) {
599  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
601  return;
602  }
603 
604  if (!sample_buffer) {
605  return;
606  }
607 
608  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
609  int set_status = set_extradata(avctx, sample_buffer);
610  if (set_status) {
611  set_async_error(vtctx, set_status);
612  return;
613  }
614  }
615 
616  vtenc_q_push(vtctx, sample_buffer, sei);
617 }
618 
620  AVCodecContext *avctx,
621  CMSampleBufferRef sample_buffer,
622  size_t *size)
623 {
624  VTEncContext *vtctx = avctx->priv_data;
625  CMVideoFormatDescriptionRef vid_fmt;
626  int isize;
627  int status;
628 
629  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
630  if (!vid_fmt) {
631  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
632  return AVERROR_EXTERNAL;
633  }
634 
635  status = vtctx->get_param_set_func(vid_fmt,
636  0,
637  NULL,
638  NULL,
639  NULL,
640  &isize);
641  if (status) {
642  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
643  return AVERROR_EXTERNAL;
644  }
645 
646  *size = isize;
647  return 0;
648 }
649 
650 /*
651  * Returns true on success.
652  *
653  * If profile_level_val is NULL and this method returns true, don't specify the
654  * profile/level to the encoder.
655  */
657  CFStringRef *profile_level_val)
658 {
659  VTEncContext *vtctx = avctx->priv_data;
660  int64_t profile = vtctx->profile;
661 
662  if (profile == H264_PROF_AUTO && vtctx->level) {
663  //Need to pick a profile if level is not auto-selected.
665  }
666 
667  *profile_level_val = NULL;
668 
669  switch (profile) {
670  case H264_PROF_AUTO:
671  return true;
672 
673  case H264_PROF_BASELINE:
674  switch (vtctx->level) {
675  case 0: *profile_level_val =
676  compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
677  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
678  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
679  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
680  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
681  case 40: *profile_level_val =
682  compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
683  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
684  case 42: *profile_level_val =
685  compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
686  case 50: *profile_level_val =
687  compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
688  case 51: *profile_level_val =
689  compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
690  case 52: *profile_level_val =
691  compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
692  }
693  break;
694 
695  case H264_PROF_MAIN:
696  switch (vtctx->level) {
697  case 0: *profile_level_val =
698  compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
699  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
700  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
701  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
702  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
703  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
704  case 42: *profile_level_val =
705  compat_keys.kVTProfileLevel_H264_Main_4_2; break;
706  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
707  case 51: *profile_level_val =
708  compat_keys.kVTProfileLevel_H264_Main_5_1; break;
709  case 52: *profile_level_val =
710  compat_keys.kVTProfileLevel_H264_Main_5_2; break;
711  }
712  break;
713 
714  case H264_PROF_HIGH:
715  switch (vtctx->level) {
716  case 0: *profile_level_val =
717  compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
718  case 30: *profile_level_val =
719  compat_keys.kVTProfileLevel_H264_High_3_0; break;
720  case 31: *profile_level_val =
721  compat_keys.kVTProfileLevel_H264_High_3_1; break;
722  case 32: *profile_level_val =
723  compat_keys.kVTProfileLevel_H264_High_3_2; break;
724  case 40: *profile_level_val =
725  compat_keys.kVTProfileLevel_H264_High_4_0; break;
726  case 41: *profile_level_val =
727  compat_keys.kVTProfileLevel_H264_High_4_1; break;
728  case 42: *profile_level_val =
729  compat_keys.kVTProfileLevel_H264_High_4_2; break;
730  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
731  case 51: *profile_level_val =
732  compat_keys.kVTProfileLevel_H264_High_5_1; break;
733  case 52: *profile_level_val =
734  compat_keys.kVTProfileLevel_H264_High_5_2; break;
735  }
736  break;
737  case H264_PROF_EXTENDED:
738  switch (vtctx->level) {
739  case 0: *profile_level_val =
740  compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
741  case 50: *profile_level_val =
742  compat_keys.kVTProfileLevel_H264_Extended_5_0; break;
743  }
744  break;
745  }
746 
747  if (!*profile_level_val) {
748  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
749  return false;
750  }
751 
752  return true;
753 }
754 
755 /*
756  * Returns true on success.
757  *
758  * If profile_level_val is NULL and this method returns true, don't specify the
759  * profile/level to the encoder.
760  */
762  CFStringRef *profile_level_val)
763 {
764  VTEncContext *vtctx = avctx->priv_data;
765  int64_t profile = vtctx->profile;
766 
767  *profile_level_val = NULL;
768 
769  switch (profile) {
770  case HEVC_PROF_AUTO:
771  return true;
772  case HEVC_PROF_MAIN:
773  *profile_level_val =
774  compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
775  break;
776  case HEVC_PROF_MAIN10:
777  *profile_level_val =
778  compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
779  break;
780  }
781 
782  if (!*profile_level_val) {
783  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
784  return false;
785  }
786 
787  return true;
788 }
789 
791  enum AVPixelFormat fmt,
792  enum AVColorRange range,
793  int* av_pixel_format,
794  int* range_guessed)
795 {
796  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
797  range != AVCOL_RANGE_JPEG;
798 
799  //MPEG range is used when no range is set
800  if (fmt == AV_PIX_FMT_NV12) {
801  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
802  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
803  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
804  } else if (fmt == AV_PIX_FMT_YUV420P) {
805  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
806  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
807  kCVPixelFormatType_420YpCbCr8Planar;
808  } else if (fmt == AV_PIX_FMT_BGRA) {
809  *av_pixel_format = kCVPixelFormatType_32BGRA;
810  } else if (fmt == AV_PIX_FMT_P010LE) {
811  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
814  } else {
815  return AVERROR(EINVAL);
816  }
817 
818  return 0;
819 }
820 
821 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
822  VTEncContext *vtctx = avctx->priv_data;
823 
824  if (vtctx->color_primaries) {
825  CFDictionarySetValue(dict,
826  kCVImageBufferColorPrimariesKey,
827  vtctx->color_primaries);
828  }
829 
830  if (vtctx->transfer_function) {
831  CFDictionarySetValue(dict,
832  kCVImageBufferTransferFunctionKey,
833  vtctx->transfer_function);
834  }
835 
836  if (vtctx->ycbcr_matrix) {
837  CFDictionarySetValue(dict,
838  kCVImageBufferYCbCrMatrixKey,
839  vtctx->ycbcr_matrix);
840  }
841 }
842 
844  CFMutableDictionaryRef* dict)
845 {
846  CFNumberRef cv_color_format_num = NULL;
847  CFNumberRef width_num = NULL;
848  CFNumberRef height_num = NULL;
849  CFMutableDictionaryRef pixel_buffer_info = NULL;
850  int cv_color_format;
851  int status = get_cv_pixel_format(avctx,
852  avctx->pix_fmt,
853  avctx->color_range,
854  &cv_color_format,
855  NULL);
856  if (status) return status;
857 
858  pixel_buffer_info = CFDictionaryCreateMutable(
859  kCFAllocatorDefault,
860  20,
861  &kCFCopyStringDictionaryKeyCallBacks,
862  &kCFTypeDictionaryValueCallBacks);
863 
864  if (!pixel_buffer_info) goto pbinfo_nomem;
865 
866  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
867  kCFNumberSInt32Type,
868  &cv_color_format);
869  if (!cv_color_format_num) goto pbinfo_nomem;
870 
871  CFDictionarySetValue(pixel_buffer_info,
872  kCVPixelBufferPixelFormatTypeKey,
873  cv_color_format_num);
874  vt_release_num(&cv_color_format_num);
875 
876  width_num = CFNumberCreate(kCFAllocatorDefault,
877  kCFNumberSInt32Type,
878  &avctx->width);
879  if (!width_num) return AVERROR(ENOMEM);
880 
881  CFDictionarySetValue(pixel_buffer_info,
882  kCVPixelBufferWidthKey,
883  width_num);
884  vt_release_num(&width_num);
885 
886  height_num = CFNumberCreate(kCFAllocatorDefault,
887  kCFNumberSInt32Type,
888  &avctx->height);
889  if (!height_num) goto pbinfo_nomem;
890 
891  CFDictionarySetValue(pixel_buffer_info,
892  kCVPixelBufferHeightKey,
893  height_num);
894  vt_release_num(&height_num);
895 
896  add_color_attr(avctx, pixel_buffer_info);
897 
898  *dict = pixel_buffer_info;
899  return 0;
900 
901 pbinfo_nomem:
902  vt_release_num(&cv_color_format_num);
903  vt_release_num(&width_num);
904  vt_release_num(&height_num);
905  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
906 
907  return AVERROR(ENOMEM);
908 }
909 
911  CFStringRef *primaries)
912 {
913  enum AVColorPrimaries pri = avctx->color_primaries;
914  switch (pri) {
916  *primaries = NULL;
917  break;
918 
919  case AVCOL_PRI_BT470BG:
920  *primaries = kCVImageBufferColorPrimaries_EBU_3213;
921  break;
922 
923  case AVCOL_PRI_SMPTE170M:
924  *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
925  break;
926 
927  case AVCOL_PRI_BT709:
928  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
929  break;
930 
931  case AVCOL_PRI_BT2020:
932  *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
933  break;
934 
935  default:
936  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
937  *primaries = NULL;
938  return -1;
939  }
940 
941  return 0;
942 }
943 
945  CFStringRef *transfer_fnc,
946  CFNumberRef *gamma_level)
947 {
948  enum AVColorTransferCharacteristic trc = avctx->color_trc;
949  Float32 gamma;
950  *gamma_level = NULL;
951 
952  switch (trc) {
954  *transfer_fnc = NULL;
955  break;
956 
957  case AVCOL_TRC_BT709:
958  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
959  break;
960 
961  case AVCOL_TRC_SMPTE240M:
962  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
963  break;
964 
965 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
966  case AVCOL_TRC_SMPTE2084:
967  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
968  break;
969 #endif
970 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
971  case AVCOL_TRC_LINEAR:
972  *transfer_fnc = kCVImageBufferTransferFunction_Linear;
973  break;
974 #endif
975 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
977  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
978  break;
979 #endif
980 
981  case AVCOL_TRC_GAMMA22:
982  gamma = 2.2;
983  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
984  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
985  break;
986 
987  case AVCOL_TRC_GAMMA28:
988  gamma = 2.8;
989  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
990  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
991  break;
992 
993  case AVCOL_TRC_BT2020_10:
994  case AVCOL_TRC_BT2020_12:
995  *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
996  break;
997 
998  default:
999  *transfer_fnc = NULL;
1000  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
1001  return -1;
1002  }
1003 
1004  return 0;
1005 }
1006 
1007 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
1008  switch(avctx->colorspace) {
1009  case AVCOL_SPC_BT709:
1010  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
1011  break;
1012 
1013  case AVCOL_SPC_UNSPECIFIED:
1014  *matrix = NULL;
1015  break;
1016 
1017  case AVCOL_SPC_BT470BG:
1018  case AVCOL_SPC_SMPTE170M:
1019  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
1020  break;
1021 
1022  case AVCOL_SPC_SMPTE240M:
1023  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1024  break;
1025 
1026  case AVCOL_SPC_BT2020_NCL:
1027  *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1028  break;
1029 
1030  default:
1031  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1032  return -1;
1033  }
1034 
1035  return 0;
1036 }
1037 
1038 // constant quality only on Macs with Apple Silicon
1039 static bool vtenc_qscale_enabled(void)
1040 {
1041  return !TARGET_OS_IPHONE && TARGET_CPU_ARM64;
1042 }
1043 
1045  CMVideoCodecType codec_type,
1046  CFStringRef profile_level,
1047  CFNumberRef gamma_level,
1048  CFDictionaryRef enc_info,
1049  CFDictionaryRef pixel_buffer_info,
1050  VTCompressionSessionRef *session)
1051 {
1052  VTEncContext *vtctx = avctx->priv_data;
1053  SInt32 bit_rate = avctx->bit_rate;
1054  SInt32 max_rate = avctx->rc_max_rate;
1055  Float32 quality = avctx->global_quality / FF_QP2LAMBDA;
1056  CFNumberRef bit_rate_num;
1057  CFNumberRef quality_num;
1058  CFNumberRef bytes_per_second;
1059  CFNumberRef one_second;
1060  CFArrayRef data_rate_limits;
1061  int64_t bytes_per_second_value = 0;
1062  int64_t one_second_value = 0;
1063  void *nums[2];
1064 
1065  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1066  avctx->width,
1067  avctx->height,
1068  codec_type,
1069  enc_info,
1070  pixel_buffer_info,
1071  kCFAllocatorDefault,
1073  avctx,
1074  session);
1075 
1076  if (status || !vtctx->session) {
1077  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1078 
1079 #if !TARGET_OS_IPHONE
1080  if (!vtctx->allow_sw) {
1081  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1082  }
1083 #endif
1084 
1085  return AVERROR_EXTERNAL;
1086  }
1087 
1088  if (avctx->flags & AV_CODEC_FLAG_QSCALE && !vtenc_qscale_enabled()) {
1089  av_log(avctx, AV_LOG_ERROR, "Error: -q:v qscale not available for encoder. Use -b:v bitrate instead.\n");
1090  return AVERROR_EXTERNAL;
1091  }
1092 
1093  if (avctx->flags & AV_CODEC_FLAG_QSCALE) {
1094  quality = quality >= 100 ? 1.0 : quality / 100;
1095  quality_num = CFNumberCreate(kCFAllocatorDefault,
1096  kCFNumberFloat32Type,
1097  &quality);
1098  if (!quality_num) return AVERROR(ENOMEM);
1099 
1100  status = VTSessionSetProperty(vtctx->session,
1101  kVTCompressionPropertyKey_Quality,
1102  quality_num);
1103  CFRelease(quality_num);
1104  } else {
1105  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1106  kCFNumberSInt32Type,
1107  &bit_rate);
1108  if (!bit_rate_num) return AVERROR(ENOMEM);
1109 
1110  status = VTSessionSetProperty(vtctx->session,
1111  kVTCompressionPropertyKey_AverageBitRate,
1112  bit_rate_num);
1113  CFRelease(bit_rate_num);
1114  }
1115 
1116  if (status) {
1117  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1118  return AVERROR_EXTERNAL;
1119  }
1120 
1121  if ((vtctx->codec_id == AV_CODEC_ID_H264 || vtctx->codec_id == AV_CODEC_ID_HEVC)
1122  && max_rate > 0) {
1123  bytes_per_second_value = max_rate >> 3;
1124  bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1125  kCFNumberSInt64Type,
1126  &bytes_per_second_value);
1127  if (!bytes_per_second) {
1128  return AVERROR(ENOMEM);
1129  }
1130  one_second_value = 1;
1131  one_second = CFNumberCreate(kCFAllocatorDefault,
1132  kCFNumberSInt64Type,
1133  &one_second_value);
1134  if (!one_second) {
1135  CFRelease(bytes_per_second);
1136  return AVERROR(ENOMEM);
1137  }
1138  nums[0] = (void *)bytes_per_second;
1139  nums[1] = (void *)one_second;
1140  data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1141  (const void **)nums,
1142  2,
1143  &kCFTypeArrayCallBacks);
1144 
1145  if (!data_rate_limits) {
1146  CFRelease(bytes_per_second);
1147  CFRelease(one_second);
1148  return AVERROR(ENOMEM);
1149  }
1150  status = VTSessionSetProperty(vtctx->session,
1151  kVTCompressionPropertyKey_DataRateLimits,
1152  data_rate_limits);
1153 
1154  CFRelease(bytes_per_second);
1155  CFRelease(one_second);
1156  CFRelease(data_rate_limits);
1157 
1158  if (status) {
1159  av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1160  // kVTCompressionPropertyKey_DataRateLimits is available for HEVC
1161  // now but not on old release. There is no document about since
1162  // when. So ignore the error if it failed for hevc.
1163  if (vtctx->codec_id != AV_CODEC_ID_HEVC)
1164  return AVERROR_EXTERNAL;
1165  }
1166  }
1167 
1168  if (vtctx->codec_id == AV_CODEC_ID_HEVC) {
1169  if (avctx->pix_fmt == AV_PIX_FMT_BGRA && vtctx->alpha_quality > 0.0) {
1170  CFNumberRef alpha_quality_num = CFNumberCreate(kCFAllocatorDefault,
1171  kCFNumberDoubleType,
1172  &vtctx->alpha_quality);
1173  if (!alpha_quality_num) return AVERROR(ENOMEM);
1174 
1175  status = VTSessionSetProperty(vtctx->session,
1176  compat_keys.kVTCompressionPropertyKey_TargetQualityForAlpha,
1177  alpha_quality_num);
1178  CFRelease(alpha_quality_num);
1179  }
1180  }
1181 
1182  if (profile_level) {
1183  status = VTSessionSetProperty(vtctx->session,
1184  kVTCompressionPropertyKey_ProfileLevel,
1185  profile_level);
1186  if (status) {
1187  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1188  }
1189  }
1190 
1191  if (avctx->gop_size > 0) {
1192  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1193  kCFNumberIntType,
1194  &avctx->gop_size);
1195  if (!interval) {
1196  return AVERROR(ENOMEM);
1197  }
1198 
1199  status = VTSessionSetProperty(vtctx->session,
1200  kVTCompressionPropertyKey_MaxKeyFrameInterval,
1201  interval);
1202  CFRelease(interval);
1203 
1204  if (status) {
1205  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1206  return AVERROR_EXTERNAL;
1207  }
1208  }
1209 
1210  if (vtctx->frames_before) {
1211  status = VTSessionSetProperty(vtctx->session,
1212  kVTCompressionPropertyKey_MoreFramesBeforeStart,
1213  kCFBooleanTrue);
1214 
1215  if (status == kVTPropertyNotSupportedErr) {
1216  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1217  } else if (status) {
1218  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1219  }
1220  }
1221 
1222  if (vtctx->frames_after) {
1223  status = VTSessionSetProperty(vtctx->session,
1224  kVTCompressionPropertyKey_MoreFramesAfterEnd,
1225  kCFBooleanTrue);
1226 
1227  if (status == kVTPropertyNotSupportedErr) {
1228  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1229  } else if (status) {
1230  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1231  }
1232  }
1233 
1234  if (avctx->sample_aspect_ratio.num != 0) {
1235  CFNumberRef num;
1236  CFNumberRef den;
1237  CFMutableDictionaryRef par;
1238  AVRational *avpar = &avctx->sample_aspect_ratio;
1239 
1240  av_reduce(&avpar->num, &avpar->den,
1241  avpar->num, avpar->den,
1242  0xFFFFFFFF);
1243 
1244  num = CFNumberCreate(kCFAllocatorDefault,
1245  kCFNumberIntType,
1246  &avpar->num);
1247 
1248  den = CFNumberCreate(kCFAllocatorDefault,
1249  kCFNumberIntType,
1250  &avpar->den);
1251 
1252 
1253 
1254  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1255  2,
1256  &kCFCopyStringDictionaryKeyCallBacks,
1257  &kCFTypeDictionaryValueCallBacks);
1258 
1259  if (!par || !num || !den) {
1260  if (par) CFRelease(par);
1261  if (num) CFRelease(num);
1262  if (den) CFRelease(den);
1263 
1264  return AVERROR(ENOMEM);
1265  }
1266 
1267  CFDictionarySetValue(
1268  par,
1269  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1270  num);
1271 
1272  CFDictionarySetValue(
1273  par,
1274  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1275  den);
1276 
1277  status = VTSessionSetProperty(vtctx->session,
1278  kVTCompressionPropertyKey_PixelAspectRatio,
1279  par);
1280 
1281  CFRelease(par);
1282  CFRelease(num);
1283  CFRelease(den);
1284 
1285  if (status) {
1286  av_log(avctx,
1287  AV_LOG_ERROR,
1288  "Error setting pixel aspect ratio to %d:%d: %d.\n",
1289  avctx->sample_aspect_ratio.num,
1290  avctx->sample_aspect_ratio.den,
1291  status);
1292 
1293  return AVERROR_EXTERNAL;
1294  }
1295  }
1296 
1297 
1298  if (vtctx->transfer_function) {
1299  status = VTSessionSetProperty(vtctx->session,
1300  kVTCompressionPropertyKey_TransferFunction,
1301  vtctx->transfer_function);
1302 
1303  if (status) {
1304  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1305  }
1306  }
1307 
1308 
1309  if (vtctx->ycbcr_matrix) {
1310  status = VTSessionSetProperty(vtctx->session,
1311  kVTCompressionPropertyKey_YCbCrMatrix,
1312  vtctx->ycbcr_matrix);
1313 
1314  if (status) {
1315  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1316  }
1317  }
1318 
1319 
1320  if (vtctx->color_primaries) {
1321  status = VTSessionSetProperty(vtctx->session,
1322  kVTCompressionPropertyKey_ColorPrimaries,
1323  vtctx->color_primaries);
1324 
1325  if (status) {
1326  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1327  }
1328  }
1329 
1330  if (gamma_level) {
1331  status = VTSessionSetProperty(vtctx->session,
1332  kCVImageBufferGammaLevelKey,
1333  gamma_level);
1334 
1335  if (status) {
1336  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1337  }
1338  }
1339 
1340  if (!vtctx->has_b_frames) {
1341  status = VTSessionSetProperty(vtctx->session,
1342  kVTCompressionPropertyKey_AllowFrameReordering,
1343  kCFBooleanFalse);
1344 
1345  if (status) {
1346  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1347  return AVERROR_EXTERNAL;
1348  }
1349  }
1350 
1351  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1352  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1353  compat_keys.kVTH264EntropyMode_CABAC:
1354  compat_keys.kVTH264EntropyMode_CAVLC;
1355 
1356  status = VTSessionSetProperty(vtctx->session,
1357  compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1358  entropy);
1359 
1360  if (status) {
1361  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1362  }
1363  }
1364 
1365  if (vtctx->realtime) {
1366  status = VTSessionSetProperty(vtctx->session,
1367  compat_keys.kVTCompressionPropertyKey_RealTime,
1368  kCFBooleanTrue);
1369 
1370  if (status) {
1371  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1372  }
1373  }
1374 
1375  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1376  if (status) {
1377  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1378  return AVERROR_EXTERNAL;
1379  }
1380 
1381  return 0;
1382 }
1383 
1385 {
1386  CFMutableDictionaryRef enc_info;
1387  CFMutableDictionaryRef pixel_buffer_info;
1388  CMVideoCodecType codec_type;
1389  VTEncContext *vtctx = avctx->priv_data;
1390  CFStringRef profile_level;
1391  CFNumberRef gamma_level = NULL;
1392  int status;
1393 
1394  codec_type = get_cm_codec_type(avctx->codec_id, avctx->pix_fmt, vtctx->alpha_quality);
1395  if (!codec_type) {
1396  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1397  return AVERROR(EINVAL);
1398  }
1399 
1400  vtctx->codec_id = avctx->codec_id;
1401 
1402  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1403  vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1404 
1405  vtctx->has_b_frames = avctx->max_b_frames > 0;
1406  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1407  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1408  vtctx->has_b_frames = 0;
1409  }
1410 
1411  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1412  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1413  vtctx->entropy = VT_ENTROPY_NOT_SET;
1414  }
1415 
1416  if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1417  } else {
1418  vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1419  if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1420  if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1421  // HEVC has b-byramid
1422  vtctx->has_b_frames = avctx->max_b_frames > 0 ? 2 : 0;
1423  }
1424 
1425  enc_info = CFDictionaryCreateMutable(
1426  kCFAllocatorDefault,
1427  20,
1428  &kCFCopyStringDictionaryKeyCallBacks,
1429  &kCFTypeDictionaryValueCallBacks
1430  );
1431 
1432  if (!enc_info) return AVERROR(ENOMEM);
1433 
1434 #if !TARGET_OS_IPHONE
1435  if(vtctx->require_sw) {
1436  CFDictionarySetValue(enc_info,
1437  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1438  kCFBooleanFalse);
1439  } else if (!vtctx->allow_sw) {
1440  CFDictionarySetValue(enc_info,
1441  compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1442  kCFBooleanTrue);
1443  } else {
1444  CFDictionarySetValue(enc_info,
1445  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1446  kCFBooleanTrue);
1447  }
1448 #endif
1449 
1450  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1451  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1452  if (status)
1453  goto init_cleanup;
1454  } else {
1455  pixel_buffer_info = NULL;
1456  }
1457 
1458  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1459 
1460  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1461  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1462  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1463 
1464 
1465  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1467  codec_type,
1468  profile_level,
1469  gamma_level,
1470  enc_info,
1471  pixel_buffer_info);
1472  if (status)
1473  goto init_cleanup;
1474  }
1475 
1476  status = vtenc_create_encoder(avctx,
1477  codec_type,
1478  profile_level,
1479  gamma_level,
1480  enc_info,
1481  pixel_buffer_info,
1482  &vtctx->session);
1483 
1484 init_cleanup:
1485  if (gamma_level)
1486  CFRelease(gamma_level);
1487 
1488  if (pixel_buffer_info)
1489  CFRelease(pixel_buffer_info);
1490 
1491  CFRelease(enc_info);
1492 
1493  return status;
1494 }
1495 
1497 {
1498  VTEncContext *vtctx = avctx->priv_data;
1499  CFBooleanRef has_b_frames_cfbool;
1500  int status;
1501 
1503 
1504  pthread_mutex_init(&vtctx->lock, NULL);
1506 
1507  vtctx->session = NULL;
1509  if (status) return status;
1510 
1511  status = VTSessionCopyProperty(vtctx->session,
1512  kVTCompressionPropertyKey_AllowFrameReordering,
1513  kCFAllocatorDefault,
1514  &has_b_frames_cfbool);
1515 
1516  if (!status && has_b_frames_cfbool) {
1517  //Some devices don't output B-frames for main profile, even if requested.
1518  // HEVC has b-pyramid
1519  vtctx->has_b_frames = (CFBooleanGetValue(has_b_frames_cfbool) && avctx->codec_id == AV_CODEC_ID_HEVC) ? 2 : 1;
1520  CFRelease(has_b_frames_cfbool);
1521  }
1522  avctx->has_b_frames = vtctx->has_b_frames;
1523 
1524  return 0;
1525 }
1526 
1527 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1528 {
1529  CFArrayRef attachments;
1530  CFDictionaryRef attachment;
1531  CFBooleanRef not_sync;
1532  CFIndex len;
1533 
1534  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1535  len = !attachments ? 0 : CFArrayGetCount(attachments);
1536 
1537  if (!len) {
1538  *is_key_frame = true;
1539  return;
1540  }
1541 
1542  attachment = CFArrayGetValueAtIndex(attachments, 0);
1543 
1544  if (CFDictionaryGetValueIfPresent(attachment,
1545  kCMSampleAttachmentKey_NotSync,
1546  (const void **)&not_sync))
1547  {
1548  *is_key_frame = !CFBooleanGetValue(not_sync);
1549  } else {
1550  *is_key_frame = true;
1551  }
1552 }
1553 
1554 static int is_post_sei_nal_type(int nal_type){
1555  return nal_type != H264_NAL_SEI &&
1556  nal_type != H264_NAL_SPS &&
1557  nal_type != H264_NAL_PPS &&
1558  nal_type != H264_NAL_AUD;
1559 }
1560 
1561 /*
1562  * Finds the sei message start/size of type find_sei_type.
1563  * If more than one of that type exists, the last one is returned.
1564  */
1565 static int find_sei_end(AVCodecContext *avctx,
1566  uint8_t *nal_data,
1567  size_t nal_size,
1568  uint8_t **sei_end)
1569 {
1570  int nal_type;
1571  size_t sei_payload_size = 0;
1572  int sei_payload_type = 0;
1573  *sei_end = NULL;
1574  uint8_t *nal_start = nal_data;
1575 
1576  if (!nal_size)
1577  return 0;
1578 
1579  nal_type = *nal_data & 0x1F;
1580  if (nal_type != H264_NAL_SEI)
1581  return 0;
1582 
1583  nal_data++;
1584  nal_size--;
1585 
1586  if (nal_data[nal_size - 1] == 0x80)
1587  nal_size--;
1588 
1589  while (nal_size > 0 && *nal_data > 0) {
1590  do{
1591  sei_payload_type += *nal_data;
1592  nal_data++;
1593  nal_size--;
1594  } while (nal_size > 0 && *nal_data == 0xFF);
1595 
1596  if (!nal_size) {
1597  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1598  return AVERROR_INVALIDDATA;
1599  }
1600 
1601  do{
1602  sei_payload_size += *nal_data;
1603  nal_data++;
1604  nal_size--;
1605  } while (nal_size > 0 && *nal_data == 0xFF);
1606 
1607  if (nal_size < sei_payload_size) {
1608  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1609  return AVERROR_INVALIDDATA;
1610  }
1611 
1612  nal_data += sei_payload_size;
1613  nal_size -= sei_payload_size;
1614  }
1615 
1616  *sei_end = nal_data;
1617 
1618  return nal_data - nal_start + 1;
1619 }
1620 
1621 /**
1622  * Copies the data inserting emulation prevention bytes as needed.
1623  * Existing data in the destination can be taken into account by providing
1624  * dst with a dst_offset > 0.
1625  *
1626  * @return The number of bytes copied on success. On failure, the negative of
1627  * the number of bytes needed to copy src is returned.
1628  */
1629 static int copy_emulation_prev(const uint8_t *src,
1630  size_t src_size,
1631  uint8_t *dst,
1632  ssize_t dst_offset,
1633  size_t dst_size)
1634 {
1635  int zeros = 0;
1636  int wrote_bytes;
1637  uint8_t* dst_start;
1638  uint8_t* dst_end = dst + dst_size;
1639  const uint8_t* src_end = src + src_size;
1640  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1641  int i;
1642  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1643  if (!dst[i])
1644  zeros++;
1645  else
1646  zeros = 0;
1647  }
1648 
1649  dst += dst_offset;
1650  dst_start = dst;
1651  for (; src < src_end; src++, dst++) {
1652  if (zeros == 2) {
1653  int insert_ep3_byte = *src <= 3;
1654  if (insert_ep3_byte) {
1655  if (dst < dst_end)
1656  *dst = 3;
1657  dst++;
1658  }
1659 
1660  zeros = 0;
1661  }
1662 
1663  if (dst < dst_end)
1664  *dst = *src;
1665 
1666  if (!*src)
1667  zeros++;
1668  else
1669  zeros = 0;
1670  }
1671 
1672  wrote_bytes = dst - dst_start;
1673 
1674  if (dst > dst_end)
1675  return -wrote_bytes;
1676 
1677  return wrote_bytes;
1678 }
1679 
1680 static int write_sei(const ExtraSEI *sei,
1681  int sei_type,
1682  uint8_t *dst,
1683  size_t dst_size)
1684 {
1685  uint8_t *sei_start = dst;
1686  size_t remaining_sei_size = sei->size;
1687  size_t remaining_dst_size = dst_size;
1688  int header_bytes;
1689  int bytes_written;
1690  ssize_t offset;
1691 
1692  if (!remaining_dst_size)
1693  return AVERROR_BUFFER_TOO_SMALL;
1694 
1695  while (sei_type && remaining_dst_size != 0) {
1696  int sei_byte = sei_type > 255 ? 255 : sei_type;
1697  *dst = sei_byte;
1698 
1699  sei_type -= sei_byte;
1700  dst++;
1701  remaining_dst_size--;
1702  }
1703 
1704  if (!dst_size)
1705  return AVERROR_BUFFER_TOO_SMALL;
1706 
1707  while (remaining_sei_size && remaining_dst_size != 0) {
1708  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1709  *dst = size_byte;
1710 
1711  remaining_sei_size -= size_byte;
1712  dst++;
1713  remaining_dst_size--;
1714  }
1715 
1716  if (remaining_dst_size < sei->size)
1717  return AVERROR_BUFFER_TOO_SMALL;
1718 
1719  header_bytes = dst - sei_start;
1720 
1721  offset = header_bytes;
1722  bytes_written = copy_emulation_prev(sei->data,
1723  sei->size,
1724  sei_start,
1725  offset,
1726  dst_size);
1727  if (bytes_written < 0)
1728  return AVERROR_BUFFER_TOO_SMALL;
1729 
1730  bytes_written += header_bytes;
1731  return bytes_written;
1732 }
1733 
1734 /**
1735  * Copies NAL units and replaces length codes with
1736  * H.264 Annex B start codes. On failure, the contents of
1737  * dst_data may have been modified.
1738  *
1739  * @param length_code_size Byte length of each length code
1740  * @param sample_buffer NAL units prefixed with length codes.
1741  * @param sei Optional A53 closed captions SEI data.
1742  * @param dst_data Must be zeroed before calling this function.
1743  * Contains the copied NAL units prefixed with
1744  * start codes when the function returns
1745  * successfully.
1746  * @param dst_size Length of dst_data
1747  * @return 0 on success
1748  * AVERROR_INVALIDDATA if length_code_size is invalid
1749  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1750  * or if a length_code in src_data specifies data beyond
1751  * the end of its buffer.
1752  */
1754  AVCodecContext *avctx,
1755  size_t length_code_size,
1756  CMSampleBufferRef sample_buffer,
1757  ExtraSEI *sei,
1758  uint8_t *dst_data,
1759  size_t dst_size)
1760 {
1761  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1762  size_t remaining_src_size = src_size;
1763  size_t remaining_dst_size = dst_size;
1764  size_t src_offset = 0;
1765  int wrote_sei = 0;
1766  int status;
1767  uint8_t size_buf[4];
1768  uint8_t nal_type;
1769  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1770 
1771  if (length_code_size > 4) {
1772  return AVERROR_INVALIDDATA;
1773  }
1774 
1775  while (remaining_src_size > 0) {
1776  size_t curr_src_len;
1777  size_t curr_dst_len;
1778  size_t box_len = 0;
1779  size_t i;
1780 
1781  uint8_t *dst_box;
1782 
1783  status = CMBlockBufferCopyDataBytes(block,
1784  src_offset,
1785  length_code_size,
1786  size_buf);
1787  if (status) {
1788  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1789  return AVERROR_EXTERNAL;
1790  }
1791 
1792  status = CMBlockBufferCopyDataBytes(block,
1793  src_offset + length_code_size,
1794  1,
1795  &nal_type);
1796 
1797  if (status) {
1798  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1799  return AVERROR_EXTERNAL;
1800  }
1801 
1802  nal_type &= 0x1F;
1803 
1804  for (i = 0; i < length_code_size; i++) {
1805  box_len <<= 8;
1806  box_len |= size_buf[i];
1807  }
1808 
1809  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1810  //No SEI NAL unit - insert.
1811  int wrote_bytes;
1812 
1813  memcpy(dst_data, start_code, sizeof(start_code));
1814  dst_data += sizeof(start_code);
1815  remaining_dst_size -= sizeof(start_code);
1816 
1817  *dst_data = H264_NAL_SEI;
1818  dst_data++;
1819  remaining_dst_size--;
1820 
1821  wrote_bytes = write_sei(sei,
1823  dst_data,
1824  remaining_dst_size);
1825 
1826  if (wrote_bytes < 0)
1827  return wrote_bytes;
1828 
1829  remaining_dst_size -= wrote_bytes;
1830  dst_data += wrote_bytes;
1831 
1832  if (remaining_dst_size <= 0)
1833  return AVERROR_BUFFER_TOO_SMALL;
1834 
1835  *dst_data = 0x80;
1836 
1837  dst_data++;
1838  remaining_dst_size--;
1839 
1840  wrote_sei = 1;
1841  }
1842 
1843  curr_src_len = box_len + length_code_size;
1844  curr_dst_len = box_len + sizeof(start_code);
1845 
1846  if (remaining_src_size < curr_src_len) {
1847  return AVERROR_BUFFER_TOO_SMALL;
1848  }
1849 
1850  if (remaining_dst_size < curr_dst_len) {
1851  return AVERROR_BUFFER_TOO_SMALL;
1852  }
1853 
1854  dst_box = dst_data + sizeof(start_code);
1855 
1856  memcpy(dst_data, start_code, sizeof(start_code));
1857  status = CMBlockBufferCopyDataBytes(block,
1858  src_offset + length_code_size,
1859  box_len,
1860  dst_box);
1861 
1862  if (status) {
1863  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1864  return AVERROR_EXTERNAL;
1865  }
1866 
1867  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1868  //Found SEI NAL unit - append.
1869  int wrote_bytes;
1870  int old_sei_length;
1871  int extra_bytes;
1872  uint8_t *new_sei;
1873  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1874  if (old_sei_length < 0)
1875  return status;
1876 
1877  wrote_bytes = write_sei(sei,
1879  new_sei,
1880  remaining_dst_size - old_sei_length);
1881  if (wrote_bytes < 0)
1882  return wrote_bytes;
1883 
1884  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1885  return AVERROR_BUFFER_TOO_SMALL;
1886 
1887  new_sei[wrote_bytes++] = 0x80;
1888  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1889 
1890  dst_data += extra_bytes;
1891  remaining_dst_size -= extra_bytes;
1892 
1893  wrote_sei = 1;
1894  }
1895 
1896  src_offset += curr_src_len;
1897  dst_data += curr_dst_len;
1898 
1899  remaining_src_size -= curr_src_len;
1900  remaining_dst_size -= curr_dst_len;
1901  }
1902 
1903  return 0;
1904 }
1905 
1906 /**
1907  * Returns a sufficient number of bytes to contain the sei data.
1908  * It may be greater than the minimum required.
1909  */
1910 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1911  int copied_size;
1912  if (sei->size == 0)
1913  return 0;
1914 
1915  copied_size = -copy_emulation_prev(sei->data,
1916  sei->size,
1917  NULL,
1918  0,
1919  0);
1920 
1921  if ((sei->size % 255) == 0) //may result in an extra byte
1922  copied_size++;
1923 
1924  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1925 }
1926 
1928  AVCodecContext *avctx,
1929  CMSampleBufferRef sample_buffer,
1930  AVPacket *pkt,
1931  ExtraSEI *sei)
1932 {
1933  VTEncContext *vtctx = avctx->priv_data;
1934 
1935  int status;
1936  bool is_key_frame;
1937  bool add_header;
1938  size_t length_code_size;
1939  size_t header_size = 0;
1940  size_t in_buf_size;
1941  size_t out_buf_size;
1942  size_t sei_nalu_size = 0;
1943  int64_t dts_delta;
1944  int64_t time_base_num;
1945  int nalu_count;
1946  CMTime pts;
1947  CMTime dts;
1948  CMVideoFormatDescriptionRef vid_fmt;
1949 
1950 
1951  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1952  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1953  if (status) return status;
1954 
1955  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1956 
1957  if (add_header) {
1958  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1959  if (!vid_fmt) {
1960  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1961  return AVERROR_EXTERNAL;
1962  }
1963 
1964  int status = get_params_size(avctx, vid_fmt, &header_size);
1965  if (status) return status;
1966  }
1967 
1968  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1969  if(status)
1970  return status;
1971 
1972  if (sei) {
1973  size_t msg_size = get_sei_msg_bytes(sei,
1975 
1976  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1977  }
1978 
1979  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1980  out_buf_size = header_size +
1981  in_buf_size +
1982  sei_nalu_size +
1983  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1984 
1985  status = ff_get_encode_buffer(avctx, pkt, out_buf_size, 0);
1986  if (status < 0)
1987  return status;
1988 
1989  if (add_header) {
1990  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1991  if(status) return status;
1992  }
1993 
1995  avctx,
1996  length_code_size,
1997  sample_buffer,
1998  sei,
1999  pkt->data + header_size,
2000  pkt->size - header_size
2001  );
2002 
2003  if (status) {
2004  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
2005  return status;
2006  }
2007 
2008  if (is_key_frame) {
2010  }
2011 
2012  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
2013  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
2014 
2015  if (CMTIME_IS_INVALID(dts)) {
2016  if (!vtctx->has_b_frames) {
2017  dts = pts;
2018  } else {
2019  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
2020  return AVERROR_EXTERNAL;
2021  }
2022  }
2023 
2024  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
2025  time_base_num = avctx->time_base.num;
2026  pkt->pts = pts.value / time_base_num;
2027  pkt->dts = dts.value / time_base_num - dts_delta;
2028 
2029  return 0;
2030 }
2031 
2032 /*
2033  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
2034  * containing all planes if so.
2035  */
2037  AVCodecContext *avctx,
2038  const AVFrame *frame,
2039  int *color,
2040  int *plane_count,
2041  size_t *widths,
2042  size_t *heights,
2043  size_t *strides,
2044  size_t *contiguous_buf_size)
2045 {
2046  VTEncContext *vtctx = avctx->priv_data;
2047  int av_format = frame->format;
2048  int av_color_range = frame->color_range;
2049  int i;
2050  int range_guessed;
2051  int status;
2052 
2053  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
2054  if (status) {
2055  av_log(avctx,
2056  AV_LOG_ERROR,
2057  "Could not get pixel format for color format '%s' range '%s'.\n",
2058  av_get_pix_fmt_name(av_format),
2059  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
2060  av_color_range < AVCOL_RANGE_NB ?
2061  av_color_range_name(av_color_range) :
2062  "Unknown");
2063 
2064  return AVERROR(EINVAL);
2065  }
2066 
2067  if (range_guessed) {
2068  if (!vtctx->warned_color_range) {
2069  vtctx->warned_color_range = true;
2070  av_log(avctx,
2072  "Color range not set for %s. Using MPEG range.\n",
2073  av_get_pix_fmt_name(av_format));
2074  }
2075  }
2076 
2077  switch (av_format) {
2078  case AV_PIX_FMT_NV12:
2079  *plane_count = 2;
2080 
2081  widths [0] = avctx->width;
2082  heights[0] = avctx->height;
2083  strides[0] = frame ? frame->linesize[0] : avctx->width;
2084 
2085  widths [1] = (avctx->width + 1) / 2;
2086  heights[1] = (avctx->height + 1) / 2;
2087  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2088  break;
2089 
2090  case AV_PIX_FMT_YUV420P:
2091  *plane_count = 3;
2092 
2093  widths [0] = avctx->width;
2094  heights[0] = avctx->height;
2095  strides[0] = frame ? frame->linesize[0] : avctx->width;
2096 
2097  widths [1] = (avctx->width + 1) / 2;
2098  heights[1] = (avctx->height + 1) / 2;
2099  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2100 
2101  widths [2] = (avctx->width + 1) / 2;
2102  heights[2] = (avctx->height + 1) / 2;
2103  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2104  break;
2105 
2106  case AV_PIX_FMT_BGRA:
2107  *plane_count = 1;
2108 
2109  widths [0] = avctx->width;
2110  heights[0] = avctx->height;
2111  strides[0] = frame ? frame->linesize[0] : avctx->width * 4;
2112  break;
2113 
2114  case AV_PIX_FMT_P010LE:
2115  *plane_count = 2;
2116  widths[0] = avctx->width;
2117  heights[0] = avctx->height;
2118  strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2119 
2120  widths[1] = (avctx->width + 1) / 2;
2121  heights[1] = (avctx->height + 1) / 2;
2122  strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2123  break;
2124 
2125  default:
2126  av_log(
2127  avctx,
2128  AV_LOG_ERROR,
2129  "Could not get frame format info for color %d range %d.\n",
2130  av_format,
2131  av_color_range);
2132 
2133  return AVERROR(EINVAL);
2134  }
2135 
2136  *contiguous_buf_size = 0;
2137  for (i = 0; i < *plane_count; i++) {
2138  if (i < *plane_count - 1 &&
2139  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2140  *contiguous_buf_size = 0;
2141  break;
2142  }
2143 
2144  *contiguous_buf_size += strides[i] * heights[i];
2145  }
2146 
2147  return 0;
2148 }
2149 
2150 //Not used on OSX - frame is never copied.
2152  const AVFrame *frame,
2153  CVPixelBufferRef cv_img,
2154  const size_t *plane_strides,
2155  const size_t *plane_rows)
2156 {
2157  int i, j;
2158  size_t plane_count;
2159  int status;
2160  int rows;
2161  int src_stride;
2162  int dst_stride;
2163  uint8_t *src_addr;
2164  uint8_t *dst_addr;
2165  size_t copy_bytes;
2166 
2167  status = CVPixelBufferLockBaseAddress(cv_img, 0);
2168  if (status) {
2169  av_log(
2170  avctx,
2171  AV_LOG_ERROR,
2172  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2173  status
2174  );
2175  }
2176 
2177  if (CVPixelBufferIsPlanar(cv_img)) {
2178  plane_count = CVPixelBufferGetPlaneCount(cv_img);
2179  for (i = 0; frame->data[i]; i++) {
2180  if (i == plane_count) {
2181  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2182  av_log(avctx,
2183  AV_LOG_ERROR,
2184  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2185  );
2186 
2187  return AVERROR_EXTERNAL;
2188  }
2189 
2190  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2191  src_addr = (uint8_t*)frame->data[i];
2192  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2193  src_stride = plane_strides[i];
2194  rows = plane_rows[i];
2195 
2196  if (dst_stride == src_stride) {
2197  memcpy(dst_addr, src_addr, src_stride * rows);
2198  } else {
2199  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2200 
2201  for (j = 0; j < rows; j++) {
2202  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2203  }
2204  }
2205  }
2206  } else {
2207  if (frame->data[1]) {
2208  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2209  av_log(avctx,
2210  AV_LOG_ERROR,
2211  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2212  );
2213 
2214  return AVERROR_EXTERNAL;
2215  }
2216 
2217  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2218  src_addr = (uint8_t*)frame->data[0];
2219  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2220  src_stride = plane_strides[0];
2221  rows = plane_rows[0];
2222 
2223  if (dst_stride == src_stride) {
2224  memcpy(dst_addr, src_addr, src_stride * rows);
2225  } else {
2226  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2227 
2228  for (j = 0; j < rows; j++) {
2229  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2230  }
2231  }
2232  }
2233 
2234  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2235  if (status) {
2236  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2237  return AVERROR_EXTERNAL;
2238  }
2239 
2240  return 0;
2241 }
2242 
2244  const AVFrame *frame,
2245  CVPixelBufferRef *cv_img)
2246 {
2247  int plane_count;
2248  int color;
2249  size_t widths [AV_NUM_DATA_POINTERS];
2250  size_t heights[AV_NUM_DATA_POINTERS];
2251  size_t strides[AV_NUM_DATA_POINTERS];
2252  int status;
2253  size_t contiguous_buf_size;
2254  CVPixelBufferPoolRef pix_buf_pool;
2255  VTEncContext* vtctx = avctx->priv_data;
2256 
2257  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2259 
2260  *cv_img = (CVPixelBufferRef)frame->data[3];
2261  av_assert0(*cv_img);
2262 
2263  CFRetain(*cv_img);
2264  return 0;
2265  }
2266 
2267  memset(widths, 0, sizeof(widths));
2268  memset(heights, 0, sizeof(heights));
2269  memset(strides, 0, sizeof(strides));
2270 
2272  avctx,
2273  frame,
2274  &color,
2275  &plane_count,
2276  widths,
2277  heights,
2278  strides,
2279  &contiguous_buf_size
2280  );
2281 
2282  if (status) {
2283  av_log(
2284  avctx,
2285  AV_LOG_ERROR,
2286  "Error: Cannot convert format %d color_range %d: %d\n",
2287  frame->format,
2288  frame->color_range,
2289  status
2290  );
2291 
2292  return AVERROR_EXTERNAL;
2293  }
2294 
2295  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2296  if (!pix_buf_pool) {
2297  /* On iOS, the VT session is invalidated when the APP switches from
2298  * foreground to background and vice versa. Fetch the actual error code
2299  * of the VT session to detect that case and restart the VT session
2300  * accordingly. */
2301  OSStatus vtstatus;
2302 
2303  vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2304  if (vtstatus == kVTInvalidSessionErr) {
2305  CFRelease(vtctx->session);
2306  vtctx->session = NULL;
2308  if (status == 0)
2309  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2310  }
2311  if (!pix_buf_pool) {
2312  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2313  return AVERROR_EXTERNAL;
2314  }
2315  else
2316  av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2317  "kVTInvalidSessionErr error.\n");
2318  }
2319 
2320  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2321  pix_buf_pool,
2322  cv_img);
2323 
2324 
2325  if (status) {
2326  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2327  return AVERROR_EXTERNAL;
2328  }
2329 
2330  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2331  if (status) {
2332  CFRelease(*cv_img);
2333  *cv_img = NULL;
2334  return status;
2335  }
2336 
2337  return 0;
2338 }
2339 
2341  CFDictionaryRef* dict_out)
2342 {
2343  CFDictionaryRef dict = NULL;
2344  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2345  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2346  const void *vals[] = { kCFBooleanTrue };
2347 
2348  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2349  if(!dict) return AVERROR(ENOMEM);
2350  }
2351 
2352  *dict_out = dict;
2353  return 0;
2354 }
2355 
2357  VTEncContext *vtctx,
2358  const AVFrame *frame)
2359 {
2360  CMTime time;
2361  CFDictionaryRef frame_dict;
2362  CVPixelBufferRef cv_img = NULL;
2363  AVFrameSideData *side_data = NULL;
2364  ExtraSEI *sei = NULL;
2365  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2366 
2367  if (status) return status;
2368 
2369  status = create_encoder_dict_h264(frame, &frame_dict);
2370  if (status) {
2371  CFRelease(cv_img);
2372  return status;
2373  }
2374 
2376  if (vtctx->a53_cc && side_data && side_data->size) {
2377  sei = av_mallocz(sizeof(*sei));
2378  if (!sei) {
2379  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2380  } else {
2381  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2382  if (ret < 0) {
2383  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2384  av_free(sei);
2385  sei = NULL;
2386  }
2387  }
2388  }
2389 
2390  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2391  status = VTCompressionSessionEncodeFrame(
2392  vtctx->session,
2393  cv_img,
2394  time,
2395  kCMTimeInvalid,
2396  frame_dict,
2397  sei,
2398  NULL
2399  );
2400 
2401  if (frame_dict) CFRelease(frame_dict);
2402  CFRelease(cv_img);
2403 
2404  if (status) {
2405  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2406  return AVERROR_EXTERNAL;
2407  }
2408 
2409  return 0;
2410 }
2411 
2413  AVCodecContext *avctx,
2414  AVPacket *pkt,
2415  const AVFrame *frame,
2416  int *got_packet)
2417 {
2418  VTEncContext *vtctx = avctx->priv_data;
2419  bool get_frame;
2420  int status;
2421  CMSampleBufferRef buf = NULL;
2422  ExtraSEI *sei = NULL;
2423 
2424  if (frame) {
2425  status = vtenc_send_frame(avctx, vtctx, frame);
2426 
2427  if (status) {
2429  goto end_nopkt;
2430  }
2431 
2432  if (vtctx->frame_ct_in == 0) {
2433  vtctx->first_pts = frame->pts;
2434  } else if(vtctx->frame_ct_in == vtctx->has_b_frames) {
2435  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2436  }
2437 
2438  vtctx->frame_ct_in++;
2439  } else if(!vtctx->flushing) {
2440  vtctx->flushing = true;
2441 
2442  status = VTCompressionSessionCompleteFrames(vtctx->session,
2443  kCMTimeIndefinite);
2444 
2445  if (status) {
2446  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2448  goto end_nopkt;
2449  }
2450  }
2451 
2452  *got_packet = 0;
2453  get_frame = vtctx->dts_delta >= 0 || !frame;
2454  if (!get_frame) {
2455  status = 0;
2456  goto end_nopkt;
2457  }
2458 
2459  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2460  if (status) goto end_nopkt;
2461  if (!buf) goto end_nopkt;
2462 
2463  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2464  if (sei) {
2465  if (sei->data) av_free(sei->data);
2466  av_free(sei);
2467  }
2468  CFRelease(buf);
2469  if (status) goto end_nopkt;
2470 
2471  *got_packet = 1;
2472  return 0;
2473 
2474 end_nopkt:
2476  return status;
2477 }
2478 
2480  CMVideoCodecType codec_type,
2481  CFStringRef profile_level,
2482  CFNumberRef gamma_level,
2483  CFDictionaryRef enc_info,
2484  CFDictionaryRef pixel_buffer_info)
2485 {
2486  VTEncContext *vtctx = avctx->priv_data;
2487  int status;
2488  CVPixelBufferPoolRef pool = NULL;
2489  CVPixelBufferRef pix_buf = NULL;
2490  CMTime time;
2491  CMSampleBufferRef buf = NULL;
2492 
2493  status = vtenc_create_encoder(avctx,
2494  codec_type,
2495  profile_level,
2496  gamma_level,
2497  enc_info,
2498  pixel_buffer_info,
2499  &vtctx->session);
2500  if (status)
2501  goto pe_cleanup;
2502 
2503  pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2504  if(!pool){
2505  av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2506  goto pe_cleanup;
2507  }
2508 
2509  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2510  pool,
2511  &pix_buf);
2512 
2513  if(status != kCVReturnSuccess){
2514  av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2515  goto pe_cleanup;
2516  }
2517 
2518  time = CMTimeMake(0, avctx->time_base.den);
2519  status = VTCompressionSessionEncodeFrame(vtctx->session,
2520  pix_buf,
2521  time,
2522  kCMTimeInvalid,
2523  NULL,
2524  NULL,
2525  NULL);
2526 
2527  if (status) {
2528  av_log(avctx,
2529  AV_LOG_ERROR,
2530  "Error sending frame for extradata: %d\n",
2531  status);
2532 
2533  goto pe_cleanup;
2534  }
2535 
2536  //Populates extradata - output frames are flushed and param sets are available.
2537  status = VTCompressionSessionCompleteFrames(vtctx->session,
2538  kCMTimeIndefinite);
2539 
2540  if (status)
2541  goto pe_cleanup;
2542 
2543  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2544  if (status) {
2545  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2546  goto pe_cleanup;
2547  }
2548 
2549  CFRelease(buf);
2550 
2551 
2552 
2553 pe_cleanup:
2554  if(vtctx->session)
2555  CFRelease(vtctx->session);
2556 
2557  vtctx->session = NULL;
2558  vtctx->frame_ct_out = 0;
2559 
2560  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2561 
2562  return status;
2563 }
2564 
2566 {
2567  VTEncContext *vtctx = avctx->priv_data;
2568 
2569  if(!vtctx->session) {
2571  pthread_mutex_destroy(&vtctx->lock);
2572  return 0;
2573  }
2574 
2575  VTCompressionSessionCompleteFrames(vtctx->session,
2576  kCMTimeIndefinite);
2577  clear_frame_queue(vtctx);
2579  pthread_mutex_destroy(&vtctx->lock);
2580  CFRelease(vtctx->session);
2581  vtctx->session = NULL;
2582 
2583  if (vtctx->color_primaries) {
2584  CFRelease(vtctx->color_primaries);
2585  vtctx->color_primaries = NULL;
2586  }
2587 
2588  if (vtctx->transfer_function) {
2589  CFRelease(vtctx->transfer_function);
2590  vtctx->transfer_function = NULL;
2591  }
2592 
2593  if (vtctx->ycbcr_matrix) {
2594  CFRelease(vtctx->ycbcr_matrix);
2595  vtctx->ycbcr_matrix = NULL;
2596  }
2597 
2598  return 0;
2599 }
2600 
2601 static const enum AVPixelFormat avc_pix_fmts[] = {
2606 };
2607 
2608 static const enum AVPixelFormat hevc_pix_fmts[] = {
2615 };
2616 
2617 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2618 #define COMMON_OPTIONS \
2619  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2620  { .i64 = 0 }, 0, 1, VE }, \
2621  { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2622  { .i64 = 0 }, 0, 1, VE }, \
2623  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2624  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2625  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2626  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2627  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2628  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2629 
2630 #define OFFSET(x) offsetof(VTEncContext, x)
2631 static const AVOption h264_options[] = {
2632  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2633  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2634  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2635  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2636  { "extended", "Extend Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2637 
2638  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2639  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2640  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2641  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2642  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2643  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2644  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2645  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2646  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2647  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2648  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2649 
2650  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2651  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2652  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2653  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2654  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2655 
2656  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2657 
2659  { NULL },
2660 };
2661 
2663  .class_name = "h264_videotoolbox",
2664  .item_name = av_default_item_name,
2665  .option = h264_options,
2666  .version = LIBAVUTIL_VERSION_INT,
2667 };
2668 
2670  .name = "h264_videotoolbox",
2671  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2672  .type = AVMEDIA_TYPE_VIDEO,
2673  .id = AV_CODEC_ID_H264,
2674  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
2675  .priv_data_size = sizeof(VTEncContext),
2677  .init = vtenc_init,
2678  .encode2 = vtenc_frame,
2679  .close = vtenc_close,
2680  .priv_class = &h264_videotoolbox_class,
2681  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2683 };
2684 
2685 static const AVOption hevc_options[] = {
2686  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2687  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2688  { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2689 
2690  { "alpha_quality", "Compression quality for the alpha channel", OFFSET(alpha_quality), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0.0, 1.0, VE },
2691 
2693  { NULL },
2694 };
2695 
2697  .class_name = "hevc_videotoolbox",
2698  .item_name = av_default_item_name,
2699  .option = hevc_options,
2700  .version = LIBAVUTIL_VERSION_INT,
2701 };
2702 
2704  .name = "hevc_videotoolbox",
2705  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2706  .type = AVMEDIA_TYPE_VIDEO,
2707  .id = AV_CODEC_ID_HEVC,
2708  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
2710  .priv_data_size = sizeof(VTEncContext),
2712  .init = vtenc_init,
2713  .encode2 = vtenc_frame,
2714  .close = vtenc_close,
2715  .priv_class = &hevc_videotoolbox_class,
2716  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2718  .wrapper_name = "videotoolbox",
2719 };
ff_hevc_videotoolbox_encoder
const AVCodec ff_hevc_videotoolbox_encoder
Definition: videotoolboxenc.c:2703
get_vt_hevc_profile_level
static bool get_vt_hevc_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:761
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
kVTProfileLevel_H264_Main_5_1
CFStringRef kVTProfileLevel_H264_Main_5_1
Definition: videotoolboxenc.c:81
H264_PROF_EXTENDED
@ H264_PROF_EXTENDED
Definition: videotoolboxenc.c:174
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:417
AVCodec
AVCodec.
Definition: codec.h:202
kVTCompressionPropertyKey_H264EntropyMode
CFStringRef kVTCompressionPropertyKey_H264EntropyMode
Definition: videotoolboxenc.c:70
ff_alloc_a53_sei
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: atsc_a53.c:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
kVTProfileLevel_H264_Extended_AutoLevel
CFStringRef kVTProfileLevel_H264_Extended_AutoLevel
Definition: videotoolboxenc.c:94
ExtraSEI::size
size_t size
Definition: videotoolboxenc.c:195
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
level
uint8_t level
Definition: svq3.c:204
kVTCompressionPropertyKey_RealTime
CFStringRef kVTCompressionPropertyKey_RealTime
Definition: videotoolboxenc.c:99
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
hevc_pix_fmts
static enum AVPixelFormat hevc_pix_fmts[]
Definition: videotoolboxenc.c:2608
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:661
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:476
color
Definition: vf_paletteuse.c:587
vtenc_populate_extradata
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
Definition: videotoolboxenc.c:2479
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
Definition: videotoolboxenc.c:50
vtenc_cm_to_avpacket
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
Definition: videotoolboxenc.c:1927
AV_CODEC_CAP_HARDWARE
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
Definition: codec.h:162
H264_PROF_AUTO
@ H264_PROF_AUTO
Definition: videotoolboxenc.c:170
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:485
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
copy_avframe_to_pixel_buffer
static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef cv_img, const size_t *plane_strides, const size_t *plane_rows)
Definition: videotoolboxenc.c:2151
vtenc_output_callback
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:583
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:210
get_vt_h264_profile_level
static bool get_vt_h264_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:656
profile
mfxU16 profile
Definition: qsvenc.c:45
write_sei
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:1680
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:191
pixdesc.h
kVTProfileLevel_H264_High_AutoLevel
CFStringRef kVTProfileLevel_H264_High_AutoLevel
Definition: videotoolboxenc.c:92
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:579
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVOption
AVOption.
Definition: opt.h:247
encode.h
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:479
kVTProfileLevel_H264_High_4_0
CFStringRef kVTProfileLevel_H264_High_4_0
Definition: videotoolboxenc.c:87
VTEncContext::frames_before
int64_t frames_before
Definition: videotoolboxenc.c:232
VTEncContext::lock
pthread_mutex_t lock
Definition: videotoolboxenc.c:214
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:492
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
VTEncContext::profile
int64_t profile
Definition: videotoolboxenc.c:228
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:451
HEVC_PROF_MAIN10
@ HEVC_PROF_MAIN10
Definition: videotoolboxenc.c:187
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
BufNode::sei
ExtraSEI * sei
Definition: videotoolboxenc.c:200
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:425
kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
Definition: videotoolboxenc.c:49
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:268
copy_replace_length_codes
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
Definition: videotoolboxenc.c:1753
vtenc_create_encoder
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
Definition: videotoolboxenc.c:1044
compat_keys
static struct @159 compat_keys
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:511
VTEncContext::level
int64_t level
Definition: videotoolboxenc.c:229
vtenc_get_frame_info
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
Definition: videotoolboxenc.c:1527
get_cv_pixel_format
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
Definition: videotoolboxenc.c:790
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2952
VTEncContext::frames_after
int64_t frames_after
Definition: videotoolboxenc.c:233
vtenc_close
static av_cold int vtenc_close(AVCodecContext *avctx)
Definition: videotoolboxenc.c:2565
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:580
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:482
add_color_attr
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
Definition: videotoolboxenc.c:821
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
kCVImageBufferYCbCrMatrix_ITU_R_2020
CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020
Definition: videotoolboxenc.c:68
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:53
pts
static int64_t pts
Definition: transcode_aac.c:653
VTEncContext::flushing
bool flushing
Definition: videotoolboxenc.c:239
HEVC_PROF_COUNT
@ HEVC_PROF_COUNT
Definition: videotoolboxenc.c:188
create_encoder_dict_h264
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
Definition: videotoolboxenc.c:2340
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:481
kVTProfileLevel_HEVC_Main10_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel
Definition: videotoolboxenc.c:97
h264_options
static const AVOption h264_options[]
Definition: videotoolboxenc.c:2631
ff_h264_videotoolbox_encoder
const AVCodec ff_h264_videotoolbox_encoder
Definition: videotoolboxenc.c:2669
avassert.h
get_params_size
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
Definition: videotoolboxenc.c:431
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
VTEncContext::dts_delta
int64_t dts_delta
Definition: videotoolboxenc.c:226
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:212
av_cold
#define av_cold
Definition: attributes.h:90
VTEncContext::first_pts
int64_t first_pts
Definition: videotoolboxenc.c:225
avc_pix_fmts
static enum AVPixelFormat avc_pix_fmts[]
Definition: videotoolboxenc.c:2601
kVTProfileLevel_H264_High_4_2
CFStringRef kVTProfileLevel_H264_High_4_2
Definition: videotoolboxenc.c:89
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
VTEncContext::async_error
int async_error
Definition: videotoolboxenc.c:217
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2685
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:449
VT_H264Profile
VT_H264Profile
Definition: videotoolboxenc.c:169
VT_CABAC
@ VT_CABAC
Definition: videotoolboxenc.c:181
SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35
@ SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35
Definition: sei.h:34
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:512
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
VTEncContext::cv_sample_sent
pthread_cond_t cv_sample_sent
Definition: videotoolboxenc.c:215
VTEncContext::transfer_function
CFStringRef transfer_function
Definition: videotoolboxenc.c:211
info
MIPS optimizations info
Definition: mips.txt:2
get_cm_codec_type
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id, enum AVPixelFormat fmt, double alpha_quality)
Definition: videotoolboxenc.c:408
H264_PROF_MAIN
@ H264_PROF_MAIN
Definition: videotoolboxenc.c:172
loadVTEncSymbols
static void loadVTEncSymbols()
Definition: videotoolboxenc.c:119
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
VTEncContext::alpha_quality
double alpha_quality
Definition: videotoolboxenc.c:237
CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
Definition: videotoolboxenc.c:105
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HEVC_PROF_AUTO
@ HEVC_PROF_AUTO
Definition: videotoolboxenc.c:185
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1194
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:454
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:458
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
getParameterSetAtIndex
OSStatus(* getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc, size_t parameterSetIndex, const uint8_t **parameterSetPointerOut, size_t *parameterSetSizeOut, size_t *parameterSetCountOut, int *NALUnitHeaderLengthOut)
Definition: videotoolboxenc.c:57
set_extradata
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:549
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:459
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2892
VTEncContext::frame_ct_in
int64_t frame_ct_in
Definition: videotoolboxenc.c:223
kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:103
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
kVTProfileLevel_HEVC_Main_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel
Definition: videotoolboxenc.c:96
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
BufNode
Definition: videotoolboxenc.c:198
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
kVTProfileLevel_H264_Baseline_5_2
CFStringRef kVTProfileLevel_H264_Baseline_5_2
Definition: videotoolboxenc.c:78
h264_videotoolbox_class
static const AVClass h264_videotoolbox_class
Definition: videotoolboxenc.c:2662
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:433
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:453
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
create_cv_pixel_buffer_info
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
Definition: videotoolboxenc.c:843
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2910
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:491
pthread_once
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
Definition: os2threads.h:210
H264_PROF_BASELINE
@ H264_PROF_BASELINE
Definition: videotoolboxenc.c:171
vtenc_qscale_enabled
static bool vtenc_qscale_enabled(void)
Definition: videotoolboxenc.c:1039
VTH264Entropy
VTH264Entropy
Definition: videotoolboxenc.c:178
sei
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
Definition: cbs_h264_syntax_template.c:825
ExtraSEI::data
void * data
Definition: videotoolboxenc.c:194
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:545
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:68
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:47
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:462
get_cv_transfer_function
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
Definition: videotoolboxenc.c:944
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:506
AVCOL_TRC_SMPTE2084
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:493
VTEncContext::a53_cc
int a53_cc
Definition: videotoolboxenc.c:244
VT_ENTROPY_NOT_SET
@ VT_ENTROPY_NOT_SET
Definition: videotoolboxenc.c:179
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
VTEncContext::realtime
int64_t realtime
Definition: videotoolboxenc.c:231
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:578
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:484
vt_release_num
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
Definition: videotoolboxenc.c:257
kCVImageBufferTransferFunction_ITU_R_2020
CFStringRef kCVImageBufferTransferFunction_ITU_R_2020
Definition: videotoolboxenc.c:67
size
int size
Definition: twinvq_data.h:10344
VTEncContext::allow_sw
int64_t allow_sw
Definition: videotoolboxenc.c:235
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:304
H264_PROF_COUNT
@ H264_PROF_COUNT
Definition: videotoolboxenc.c:175
VTEncContext::frame_ct_out
int64_t frame_ct_out
Definition: videotoolboxenc.c:222
create_cv_pixel_buffer
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
Definition: videotoolboxenc.c:2243
VTEncContext::entropy
int64_t entropy
Definition: videotoolboxenc.c:230
kVTProfileLevel_H264_Baseline_4_2
CFStringRef kVTProfileLevel_H264_Baseline_4_2
Definition: videotoolboxenc.c:75
kVTProfileLevel_H264_Main_5_2
CFStringRef kVTProfileLevel_H264_Main_5_2
Definition: videotoolboxenc.c:82
hevc_videotoolbox_class
static const AVClass hevc_videotoolbox_class
Definition: videotoolboxenc.c:2696
kVTProfileLevel_H264_Main_AutoLevel
CFStringRef kVTProfileLevel_H264_Main_AutoLevel
Definition: videotoolboxenc.c:83
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ExtraSEI
Definition: videotoolboxenc.c:193
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:478
kVTH264EntropyMode_CABAC
CFStringRef kVTH264EntropyMode_CABAC
Definition: videotoolboxenc.c:72
VTEncContext::get_param_set_func
getParameterSetAtIndex get_param_set_func
Definition: videotoolboxenc.c:212
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
kVTProfileLevel_H264_Baseline_AutoLevel
CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel
Definition: videotoolboxenc.c:79
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:513
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
h264_sei.h
TARGET_CPU_ARM64
#define TARGET_CPU_ARM64
Definition: videotoolboxenc.c:54
BufNode::error
int error
Definition: videotoolboxenc.c:202
vtenc_q_pop
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
Definition: videotoolboxenc.c:292
set_async_error
static void set_async_error(VTEncContext *vtctx, int err)
Definition: videotoolboxenc.c:266
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: videotoolboxenc.c:2618
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:516
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
VTEncContext::ycbcr_matrix
CFStringRef ycbcr_matrix
Definition: videotoolboxenc.c:209
VTEncContext::require_sw
int64_t require_sw
Definition: videotoolboxenc.c:236
GET_SYM
#define GET_SYM(symbol, defaultVal)
Definition: videotoolboxenc.c:108
copy_emulation_prev
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
Definition: videotoolboxenc.c:1629
VTEncContext::has_b_frames
int has_b_frames
Definition: videotoolboxenc.c:240
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
kCMVideoCodecType_HEVCWithAlpha
@ kCMVideoCodecType_HEVCWithAlpha
Definition: videotoolboxenc.c:45
kVTProfileLevel_H264_Baseline_5_1
CFStringRef kVTProfileLevel_H264_Baseline_5_1
Definition: videotoolboxenc.c:77
count_nalus
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
Definition: videotoolboxenc.c:369
is_post_sei_nal_type
static int is_post_sei_nal_type(int nal_type)
Definition: videotoolboxenc.c:1554
BufNode::next
struct BufNode * next
Definition: videotoolboxenc.c:201
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
len
int len
Definition: vorbis_enc_data.h:426
pthread_cond_t
Definition: os2threads.h:58
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:508
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
vtenc_q_push
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
Definition: videotoolboxenc.c:342
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:562
once_ctrl
static pthread_once_t once_ctrl
Definition: videotoolboxenc.c:117
kVTProfileLevel_H264_Baseline_5_0
CFStringRef kVTProfileLevel_H264_Baseline_5_0
Definition: videotoolboxenc.c:76
avcodec.h
ret
ret
Definition: filter_design.txt:187
VTEncContext
Definition: videotoolboxenc.c:205
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
atsc_a53.h
kVTProfileLevel_H264_Baseline_4_0
CFStringRef kVTProfileLevel_H264_Baseline_4_0
Definition: videotoolboxenc.c:74
clear_frame_queue
static void clear_frame_queue(VTEncContext *vtctx)
Definition: videotoolboxenc.c:287
vtenc_configure_encoder
static int vtenc_configure_encoder(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1384
kVTProfileLevel_H264_High_5_2
CFStringRef kVTProfileLevel_H264_High_5_2
Definition: videotoolboxenc.c:91
VTEncContext::session
VTCompressionSessionRef session
Definition: videotoolboxenc.c:208
HEVC_PROF_MAIN
@ HEVC_PROF_MAIN
Definition: videotoolboxenc.c:186
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolboxenc.c:41
AVCodecContext
main external API structure.
Definition: avcodec.h:383
VT_HEVCProfile
VT_HEVCProfile
Definition: videotoolboxenc.c:184
AVCOL_TRC_ARIB_STD_B67
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
Definition: pixfmt.h:497
kCVImageBufferColorPrimaries_ITU_R_2020
CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020
Definition: videotoolboxenc.c:66
kVTH264EntropyMode_CAVLC
CFStringRef kVTH264EntropyMode_CAVLC
Definition: videotoolboxenc.c:71
kVTProfileLevel_H264_High_3_1
CFStringRef kVTProfileLevel_H264_High_3_1
Definition: videotoolboxenc.c:85
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:102
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
get_cv_pixel_info
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Definition: videotoolboxenc.c:2036
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
pthread_once_t
Definition: os2threads.h:66
copy_param_sets
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:485
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
get_cv_ycbcr_matrix
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
Definition: videotoolboxenc.c:1007
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
OFFSET
#define OFFSET(x)
Definition: videotoolboxenc.c:2630
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
kVTProfileLevel_H264_Extended_5_0
CFStringRef kVTProfileLevel_H264_Extended_5_0
Definition: videotoolboxenc.c:93
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:655
kVTProfileLevel_H264_High_5_1
CFStringRef kVTProfileLevel_H264_High_5_1
Definition: videotoolboxenc.c:90
vtenc_send_frame
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
Definition: videotoolboxenc.c:2356
VE
#define VE
Definition: videotoolboxenc.c:2617
kVTProfileLevel_H264_High_4_1
CFStringRef kVTProfileLevel_H264_High_4_1
Definition: videotoolboxenc.c:88
AV_PIX_FMT_P010LE
@ AV_PIX_FMT_P010LE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:274
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
get_sei_msg_bytes
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
Definition: videotoolboxenc.c:1910
vtenc_frame
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: videotoolboxenc.c:2412
kVTProfileLevel_H264_High_3_2
CFStringRef kVTProfileLevel_H264_High_3_2
Definition: videotoolboxenc.c:86
kVTProfileLevel_H264_High_3_0
CFStringRef kVTProfileLevel_H264_High_3_0
Definition: videotoolboxenc.c:84
find_sei_end
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
Definition: videotoolboxenc.c:1565
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
VTEncContext::q_tail
BufNode * q_tail
Definition: videotoolboxenc.c:220
h264.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
VTEncContext::codec_id
enum AVCodecID codec_id
Definition: videotoolboxenc.c:207
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
VTEncContext::q_head
BufNode * q_head
Definition: videotoolboxenc.c:219
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:507
H264_PROF_HIGH
@ H264_PROF_HIGH
Definition: videotoolboxenc.c:173
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:544
int
int
Definition: ffmpeg_filter.c:156
kVTProfileLevel_H264_Main_4_2
CFStringRef kVTProfileLevel_H264_Main_4_2
Definition: videotoolboxenc.c:80
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
VT_CAVLC
@ VT_CAVLC
Definition: videotoolboxenc.c:180
PTHREAD_ONCE_INIT
#define PTHREAD_ONCE_INIT
Definition: os2threads.h:71
VTEncContext::color_primaries
CFStringRef color_primaries
Definition: videotoolboxenc.c:210
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2931
BufNode::cm_buffer
CMSampleBufferRef cm_buffer
Definition: videotoolboxenc.c:199
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:753
VTEncContext::warned_color_range
bool warned_color_range
Definition: videotoolboxenc.c:241
get_length_code_size
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
Definition: videotoolboxenc.c:619
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2484
vtenc_init
static av_cold int vtenc_init(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1496
get_cv_color_primaries
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
Definition: videotoolboxenc.c:910
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:64
kVTCompressionPropertyKey_TargetQualityForAlpha
CFStringRef kVTCompressionPropertyKey_TargetQualityForAlpha
Definition: videotoolboxenc.c:100