FFmpeg
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37 
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41 
42 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
45 #endif
46 
47 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
48  size_t parameterSetIndex,
49  const uint8_t **parameterSetPointerOut,
50  size_t *parameterSetSizeOut,
51  size_t *parameterSetCountOut,
52  int *NALUnitHeaderLengthOut);
53 
54 //These symbols may not be present
55 static struct{
59 
63 
83 
86 
88 
91 
93 } compat_keys;
94 
95 #define GET_SYM(symbol, defaultVal) \
96 do{ \
97  CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
98  if(!handle) \
99  compat_keys.symbol = CFSTR(defaultVal); \
100  else \
101  compat_keys.symbol = *handle; \
102 }while(0)
103 
105 
106 static void loadVTEncSymbols(){
107  compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
108  (getParameterSetAtIndex)dlsym(
109  RTLD_DEFAULT,
110  "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
111  );
112 
116 
120 
121  GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
122  GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
123  GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
124  GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
125  GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
126  GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
127  GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
128  GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
129  GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
130  GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
131  GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
132  GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
133  GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
134  GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
135  GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
136  GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
137  GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
138  GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
139  GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
140 
141  GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
142  GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
143 
145 
147  "EnableHardwareAcceleratedVideoEncoder");
149  "RequireHardwareAcceleratedVideoEncoder");
150 }
151 
152 typedef enum VT_H264Profile {
159 
160 typedef enum VTH264Entropy{
164 } VTH264Entropy;
165 
166 typedef enum VT_HEVCProfile {
172 
173 static const uint8_t start_code[] = { 0, 0, 0, 1 };
174 
175 typedef struct ExtraSEI {
176  void *data;
177  size_t size;
178 } ExtraSEI;
179 
180 typedef struct BufNode {
181  CMSampleBufferRef cm_buffer;
183  struct BufNode* next;
184  int error;
185 } BufNode;
186 
187 typedef struct VTEncContext {
188  AVClass *class;
190  VTCompressionSessionRef session;
191  CFStringRef ycbcr_matrix;
192  CFStringRef color_primaries;
193  CFStringRef transfer_function;
195 
198 
200 
203 
204  int64_t frame_ct_out;
205  int64_t frame_ct_in;
206 
207  int64_t first_pts;
208  int64_t dts_delta;
209 
210  int64_t profile;
211  int64_t level;
212  int64_t entropy;
213  int64_t realtime;
214  int64_t frames_before;
215  int64_t frames_after;
216 
217  int64_t allow_sw;
218  int64_t require_sw;
219 
220  bool flushing;
223  bool a53_cc;
224 } VTEncContext;
225 
226 static int vtenc_populate_extradata(AVCodecContext *avctx,
227  CMVideoCodecType codec_type,
228  CFStringRef profile_level,
229  CFNumberRef gamma_level,
230  CFDictionaryRef enc_info,
231  CFDictionaryRef pixel_buffer_info);
232 
233 /**
234  * NULL-safe release of *refPtr, and sets value to NULL.
235  */
236 static void vt_release_num(CFNumberRef* refPtr){
237  if (!*refPtr) {
238  return;
239  }
240 
241  CFRelease(*refPtr);
242  *refPtr = NULL;
243 }
244 
245 static void set_async_error(VTEncContext *vtctx, int err)
246 {
247  BufNode *info;
248 
249  pthread_mutex_lock(&vtctx->lock);
250 
251  vtctx->async_error = err;
252 
253  info = vtctx->q_head;
254  vtctx->q_head = vtctx->q_tail = NULL;
255 
256  while (info) {
257  BufNode *next = info->next;
258  CFRelease(info->cm_buffer);
259  av_free(info);
260  info = next;
261  }
262 
263  pthread_mutex_unlock(&vtctx->lock);
264 }
265 
266 static void clear_frame_queue(VTEncContext *vtctx)
267 {
268  set_async_error(vtctx, 0);
269 }
270 
271 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
272 {
273  BufNode *info;
274 
275  pthread_mutex_lock(&vtctx->lock);
276 
277  if (vtctx->async_error) {
278  pthread_mutex_unlock(&vtctx->lock);
279  return vtctx->async_error;
280  }
281 
282  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
283  *buf = NULL;
284 
285  pthread_mutex_unlock(&vtctx->lock);
286  return 0;
287  }
288 
289  while (!vtctx->q_head && !vtctx->async_error && wait) {
290  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
291  }
292 
293  if (!vtctx->q_head) {
294  pthread_mutex_unlock(&vtctx->lock);
295  *buf = NULL;
296  return 0;
297  }
298 
299  info = vtctx->q_head;
300  vtctx->q_head = vtctx->q_head->next;
301  if (!vtctx->q_head) {
302  vtctx->q_tail = NULL;
303  }
304 
305  pthread_mutex_unlock(&vtctx->lock);
306 
307  *buf = info->cm_buffer;
308  if (sei && *buf) {
309  *sei = info->sei;
310  } else if (info->sei) {
311  if (info->sei->data) av_free(info->sei->data);
312  av_free(info->sei);
313  }
314  av_free(info);
315 
316  vtctx->frame_ct_out++;
317 
318  return 0;
319 }
320 
321 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
322 {
323  BufNode *info = av_malloc(sizeof(BufNode));
324  if (!info) {
325  set_async_error(vtctx, AVERROR(ENOMEM));
326  return;
327  }
328 
329  CFRetain(buffer);
330  info->cm_buffer = buffer;
331  info->sei = sei;
332  info->next = NULL;
333 
334  pthread_mutex_lock(&vtctx->lock);
336 
337  if (!vtctx->q_head) {
338  vtctx->q_head = info;
339  } else {
340  vtctx->q_tail->next = info;
341  }
342 
343  vtctx->q_tail = info;
344 
345  pthread_mutex_unlock(&vtctx->lock);
346 }
347 
348 static int count_nalus(size_t length_code_size,
349  CMSampleBufferRef sample_buffer,
350  int *count)
351 {
352  size_t offset = 0;
353  int status;
354  int nalu_ct = 0;
355  uint8_t size_buf[4];
356  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
357  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
358 
359  if (length_code_size > 4)
360  return AVERROR_INVALIDDATA;
361 
362  while (offset < src_size) {
363  size_t curr_src_len;
364  size_t box_len = 0;
365  size_t i;
366 
367  status = CMBlockBufferCopyDataBytes(block,
368  offset,
369  length_code_size,
370  size_buf);
371 
372  for (i = 0; i < length_code_size; i++) {
373  box_len <<= 8;
374  box_len |= size_buf[i];
375  }
376 
377  curr_src_len = box_len + length_code_size;
378  offset += curr_src_len;
379 
380  nalu_ct++;
381  }
382 
383  *count = nalu_ct;
384  return 0;
385 }
386 
387 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
388 {
389  switch (id) {
390  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
392  default: return 0;
393  }
394 }
395 
396 /**
397  * Get the parameter sets from a CMSampleBufferRef.
398  * @param dst If *dst isn't NULL, the parameters are copied into existing
399  * memory. *dst_size must be set accordingly when *dst != NULL.
400  * If *dst is NULL, it will be allocated.
401  * In all cases, *dst_size is set to the number of bytes used starting
402  * at *dst.
403  */
404 static int get_params_size(
405  AVCodecContext *avctx,
406  CMVideoFormatDescriptionRef vid_fmt,
407  size_t *size)
408 {
409  VTEncContext *vtctx = avctx->priv_data;
410  size_t total_size = 0;
411  size_t ps_count;
412  int is_count_bad = 0;
413  size_t i;
414  int status;
415  status = vtctx->get_param_set_func(vid_fmt,
416  0,
417  NULL,
418  NULL,
419  &ps_count,
420  NULL);
421  if (status) {
422  is_count_bad = 1;
423  ps_count = 0;
424  status = 0;
425  }
426 
427  for (i = 0; i < ps_count || is_count_bad; i++) {
428  const uint8_t *ps;
429  size_t ps_size;
430  status = vtctx->get_param_set_func(vid_fmt,
431  i,
432  &ps,
433  &ps_size,
434  NULL,
435  NULL);
436  if (status) {
437  /*
438  * When ps_count is invalid, status != 0 ends the loop normally
439  * unless we didn't get any parameter sets.
440  */
441  if (i > 0 && is_count_bad) status = 0;
442 
443  break;
444  }
445 
446  total_size += ps_size + sizeof(start_code);
447  }
448 
449  if (status) {
450  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
451  return AVERROR_EXTERNAL;
452  }
453 
454  *size = total_size;
455  return 0;
456 }
457 
458 static int copy_param_sets(
459  AVCodecContext *avctx,
460  CMVideoFormatDescriptionRef vid_fmt,
461  uint8_t *dst,
462  size_t dst_size)
463 {
464  VTEncContext *vtctx = avctx->priv_data;
465  size_t ps_count;
466  int is_count_bad = 0;
467  int status;
468  size_t offset = 0;
469  size_t i;
470 
471  status = vtctx->get_param_set_func(vid_fmt,
472  0,
473  NULL,
474  NULL,
475  &ps_count,
476  NULL);
477  if (status) {
478  is_count_bad = 1;
479  ps_count = 0;
480  status = 0;
481  }
482 
483 
484  for (i = 0; i < ps_count || is_count_bad; i++) {
485  const uint8_t *ps;
486  size_t ps_size;
487  size_t next_offset;
488 
489  status = vtctx->get_param_set_func(vid_fmt,
490  i,
491  &ps,
492  &ps_size,
493  NULL,
494  NULL);
495  if (status) {
496  if (i > 0 && is_count_bad) status = 0;
497 
498  break;
499  }
500 
501  next_offset = offset + sizeof(start_code) + ps_size;
502  if (dst_size < next_offset) {
503  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
505  }
506 
507  memcpy(dst + offset, start_code, sizeof(start_code));
508  offset += sizeof(start_code);
509 
510  memcpy(dst + offset, ps, ps_size);
511  offset = next_offset;
512  }
513 
514  if (status) {
515  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
516  return AVERROR_EXTERNAL;
517  }
518 
519  return 0;
520 }
521 
522 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
523 {
524  CMVideoFormatDescriptionRef vid_fmt;
525  size_t total_size;
526  int status;
527 
528  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
529  if (!vid_fmt) {
530  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
531  return AVERROR_EXTERNAL;
532  }
533 
534  status = get_params_size(avctx, vid_fmt, &total_size);
535  if (status) {
536  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
537  return status;
538  }
539 
540  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
541  if (!avctx->extradata) {
542  return AVERROR(ENOMEM);
543  }
544  avctx->extradata_size = total_size;
545 
546  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
547 
548  if (status) {
549  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
550  return status;
551  }
552 
553  return 0;
554 }
555 
557  void *ctx,
558  void *sourceFrameCtx,
559  OSStatus status,
560  VTEncodeInfoFlags flags,
561  CMSampleBufferRef sample_buffer)
562 {
563  AVCodecContext *avctx = ctx;
564  VTEncContext *vtctx = avctx->priv_data;
565  ExtraSEI *sei = sourceFrameCtx;
566 
567  if (vtctx->async_error) {
568  if(sample_buffer) CFRelease(sample_buffer);
569  return;
570  }
571 
572  if (status || !sample_buffer) {
573  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
575  return;
576  }
577 
578  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
579  int set_status = set_extradata(avctx, sample_buffer);
580  if (set_status) {
581  set_async_error(vtctx, set_status);
582  return;
583  }
584  }
585 
586  vtenc_q_push(vtctx, sample_buffer, sei);
587 }
588 
590  AVCodecContext *avctx,
591  CMSampleBufferRef sample_buffer,
592  size_t *size)
593 {
594  VTEncContext *vtctx = avctx->priv_data;
595  CMVideoFormatDescriptionRef vid_fmt;
596  int isize;
597  int status;
598 
599  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
600  if (!vid_fmt) {
601  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
602  return AVERROR_EXTERNAL;
603  }
604 
605  status = vtctx->get_param_set_func(vid_fmt,
606  0,
607  NULL,
608  NULL,
609  NULL,
610  &isize);
611  if (status) {
612  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
613  return AVERROR_EXTERNAL;
614  }
615 
616  *size = isize;
617  return 0;
618 }
619 
620 /*
621  * Returns true on success.
622  *
623  * If profile_level_val is NULL and this method returns true, don't specify the
624  * profile/level to the encoder.
625  */
627  CFStringRef *profile_level_val)
628 {
629  VTEncContext *vtctx = avctx->priv_data;
630  int64_t profile = vtctx->profile;
631 
632  if (profile == H264_PROF_AUTO && vtctx->level) {
633  //Need to pick a profile if level is not auto-selected.
635  }
636 
637  *profile_level_val = NULL;
638 
639  switch (profile) {
640  case H264_PROF_AUTO:
641  return true;
642 
643  case H264_PROF_BASELINE:
644  switch (vtctx->level) {
645  case 0: *profile_level_val =
646  compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
647  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
648  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
649  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
650  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
651  case 40: *profile_level_val =
652  compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
653  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
654  case 42: *profile_level_val =
655  compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
656  case 50: *profile_level_val =
657  compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
658  case 51: *profile_level_val =
659  compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
660  case 52: *profile_level_val =
661  compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
662  }
663  break;
664 
665  case H264_PROF_MAIN:
666  switch (vtctx->level) {
667  case 0: *profile_level_val =
668  compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
669  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
670  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
671  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
672  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
673  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
674  case 42: *profile_level_val =
675  compat_keys.kVTProfileLevel_H264_Main_4_2; break;
676  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
677  case 51: *profile_level_val =
678  compat_keys.kVTProfileLevel_H264_Main_5_1; break;
679  case 52: *profile_level_val =
680  compat_keys.kVTProfileLevel_H264_Main_5_2; break;
681  }
682  break;
683 
684  case H264_PROF_HIGH:
685  switch (vtctx->level) {
686  case 0: *profile_level_val =
687  compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
688  case 30: *profile_level_val =
689  compat_keys.kVTProfileLevel_H264_High_3_0; break;
690  case 31: *profile_level_val =
691  compat_keys.kVTProfileLevel_H264_High_3_1; break;
692  case 32: *profile_level_val =
693  compat_keys.kVTProfileLevel_H264_High_3_2; break;
694  case 40: *profile_level_val =
695  compat_keys.kVTProfileLevel_H264_High_4_0; break;
696  case 41: *profile_level_val =
697  compat_keys.kVTProfileLevel_H264_High_4_1; break;
698  case 42: *profile_level_val =
699  compat_keys.kVTProfileLevel_H264_High_4_2; break;
700  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
701  case 51: *profile_level_val =
702  compat_keys.kVTProfileLevel_H264_High_5_1; break;
703  case 52: *profile_level_val =
704  compat_keys.kVTProfileLevel_H264_High_5_2; break;
705  }
706  break;
707  }
708 
709  if (!*profile_level_val) {
710  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
711  return false;
712  }
713 
714  return true;
715 }
716 
717 /*
718  * Returns true on success.
719  *
720  * If profile_level_val is NULL and this method returns true, don't specify the
721  * profile/level to the encoder.
722  */
724  CFStringRef *profile_level_val)
725 {
726  VTEncContext *vtctx = avctx->priv_data;
727  int64_t profile = vtctx->profile;
728 
729  *profile_level_val = NULL;
730 
731  switch (profile) {
732  case HEVC_PROF_AUTO:
733  return true;
734  case HEVC_PROF_MAIN:
735  *profile_level_val =
736  compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
737  break;
738  case HEVC_PROF_MAIN10:
739  *profile_level_val =
740  compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
741  break;
742  }
743 
744  if (!*profile_level_val) {
745  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
746  return false;
747  }
748 
749  return true;
750 }
751 
753  enum AVPixelFormat fmt,
754  enum AVColorRange range,
755  int* av_pixel_format,
756  int* range_guessed)
757 {
758  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
759  range != AVCOL_RANGE_JPEG;
760 
761  //MPEG range is used when no range is set
762  if (fmt == AV_PIX_FMT_NV12) {
763  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
764  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
765  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
766  } else if (fmt == AV_PIX_FMT_YUV420P) {
767  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
768  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
769  kCVPixelFormatType_420YpCbCr8Planar;
770  } else if (fmt == AV_PIX_FMT_P010LE) {
771  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
775  } else {
776  return AVERROR(EINVAL);
777  }
778 
779  return 0;
780 }
781 
782 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
783  VTEncContext *vtctx = avctx->priv_data;
784 
785  if (vtctx->color_primaries) {
786  CFDictionarySetValue(dict,
787  kCVImageBufferColorPrimariesKey,
788  vtctx->color_primaries);
789  }
790 
791  if (vtctx->transfer_function) {
792  CFDictionarySetValue(dict,
793  kCVImageBufferTransferFunctionKey,
794  vtctx->transfer_function);
795  }
796 
797  if (vtctx->ycbcr_matrix) {
798  CFDictionarySetValue(dict,
799  kCVImageBufferYCbCrMatrixKey,
800  vtctx->ycbcr_matrix);
801  }
802 }
803 
805  CFMutableDictionaryRef* dict)
806 {
807  CFNumberRef cv_color_format_num = NULL;
808  CFNumberRef width_num = NULL;
809  CFNumberRef height_num = NULL;
810  CFMutableDictionaryRef pixel_buffer_info = NULL;
811  int cv_color_format;
812  int status = get_cv_pixel_format(avctx,
813  avctx->pix_fmt,
814  avctx->color_range,
815  &cv_color_format,
816  NULL);
817  if (status) return status;
818 
819  pixel_buffer_info = CFDictionaryCreateMutable(
820  kCFAllocatorDefault,
821  20,
822  &kCFCopyStringDictionaryKeyCallBacks,
823  &kCFTypeDictionaryValueCallBacks);
824 
825  if (!pixel_buffer_info) goto pbinfo_nomem;
826 
827  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
828  kCFNumberSInt32Type,
829  &cv_color_format);
830  if (!cv_color_format_num) goto pbinfo_nomem;
831 
832  CFDictionarySetValue(pixel_buffer_info,
833  kCVPixelBufferPixelFormatTypeKey,
834  cv_color_format_num);
835  vt_release_num(&cv_color_format_num);
836 
837  width_num = CFNumberCreate(kCFAllocatorDefault,
838  kCFNumberSInt32Type,
839  &avctx->width);
840  if (!width_num) return AVERROR(ENOMEM);
841 
842  CFDictionarySetValue(pixel_buffer_info,
843  kCVPixelBufferWidthKey,
844  width_num);
845  vt_release_num(&width_num);
846 
847  height_num = CFNumberCreate(kCFAllocatorDefault,
848  kCFNumberSInt32Type,
849  &avctx->height);
850  if (!height_num) goto pbinfo_nomem;
851 
852  CFDictionarySetValue(pixel_buffer_info,
853  kCVPixelBufferHeightKey,
854  height_num);
855  vt_release_num(&height_num);
856 
857  add_color_attr(avctx, pixel_buffer_info);
858 
859  *dict = pixel_buffer_info;
860  return 0;
861 
862 pbinfo_nomem:
863  vt_release_num(&cv_color_format_num);
864  vt_release_num(&width_num);
865  vt_release_num(&height_num);
866  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
867 
868  return AVERROR(ENOMEM);
869 }
870 
872  CFStringRef *primaries)
873 {
874  enum AVColorPrimaries pri = avctx->color_primaries;
875  switch (pri) {
877  *primaries = NULL;
878  break;
879 
880  case AVCOL_PRI_BT709:
881  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
882  break;
883 
884  case AVCOL_PRI_BT2020:
885  *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
886  break;
887 
888  default:
889  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
890  *primaries = NULL;
891  return -1;
892  }
893 
894  return 0;
895 }
896 
898  CFStringRef *transfer_fnc,
899  CFNumberRef *gamma_level)
900 {
901  enum AVColorTransferCharacteristic trc = avctx->color_trc;
902  Float32 gamma;
903  *gamma_level = NULL;
904 
905  switch (trc) {
907  *transfer_fnc = NULL;
908  break;
909 
910  case AVCOL_TRC_BT709:
911  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
912  break;
913 
914  case AVCOL_TRC_SMPTE240M:
915  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
916  break;
917 
918  case AVCOL_TRC_GAMMA22:
919  gamma = 2.2;
920  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
921  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
922  break;
923 
924  case AVCOL_TRC_GAMMA28:
925  gamma = 2.8;
926  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
927  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
928  break;
929 
930  case AVCOL_TRC_BT2020_10:
931  case AVCOL_TRC_BT2020_12:
932  *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
933  break;
934 
935  default:
936  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
937  return -1;
938  }
939 
940  return 0;
941 }
942 
943 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
944  switch(avctx->colorspace) {
945  case AVCOL_SPC_BT709:
946  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
947  break;
948 
950  *matrix = NULL;
951  break;
952 
953  case AVCOL_SPC_BT470BG:
954  case AVCOL_SPC_SMPTE170M:
955  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
956  break;
957 
958  case AVCOL_SPC_SMPTE240M:
959  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
960  break;
961 
963  *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
964  break;
965 
966  default:
967  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
968  return -1;
969  }
970 
971  return 0;
972 }
973 
975  CMVideoCodecType codec_type,
976  CFStringRef profile_level,
977  CFNumberRef gamma_level,
978  CFDictionaryRef enc_info,
979  CFDictionaryRef pixel_buffer_info,
980  VTCompressionSessionRef *session)
981 {
982  VTEncContext *vtctx = avctx->priv_data;
983  SInt32 bit_rate = avctx->bit_rate;
984  SInt32 max_rate = avctx->rc_max_rate;
985  CFNumberRef bit_rate_num;
986  CFNumberRef bytes_per_second;
987  CFNumberRef one_second;
988  CFArrayRef data_rate_limits;
989  int64_t bytes_per_second_value = 0;
990  int64_t one_second_value = 0;
991  void *nums[2];
992 
993  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
994  avctx->width,
995  avctx->height,
996  codec_type,
997  enc_info,
998  pixel_buffer_info,
999  kCFAllocatorDefault,
1001  avctx,
1002  session);
1003 
1004  if (status || !vtctx->session) {
1005  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1006 
1007 #if !TARGET_OS_IPHONE
1008  if (!vtctx->allow_sw) {
1009  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1010  }
1011 #endif
1012 
1013  return AVERROR_EXTERNAL;
1014  }
1015 
1016  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1017  kCFNumberSInt32Type,
1018  &bit_rate);
1019  if (!bit_rate_num) return AVERROR(ENOMEM);
1020 
1021  status = VTSessionSetProperty(vtctx->session,
1022  kVTCompressionPropertyKey_AverageBitRate,
1023  bit_rate_num);
1024  CFRelease(bit_rate_num);
1025 
1026  if (status) {
1027  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1028  return AVERROR_EXTERNAL;
1029  }
1030 
1031  if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1032  // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1033  bytes_per_second_value = max_rate >> 3;
1034  bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1035  kCFNumberSInt64Type,
1036  &bytes_per_second_value);
1037  if (!bytes_per_second) {
1038  return AVERROR(ENOMEM);
1039  }
1040  one_second_value = 1;
1041  one_second = CFNumberCreate(kCFAllocatorDefault,
1042  kCFNumberSInt64Type,
1043  &one_second_value);
1044  if (!one_second) {
1045  CFRelease(bytes_per_second);
1046  return AVERROR(ENOMEM);
1047  }
1048  nums[0] = (void *)bytes_per_second;
1049  nums[1] = (void *)one_second;
1050  data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1051  (const void **)nums,
1052  2,
1053  &kCFTypeArrayCallBacks);
1054 
1055  if (!data_rate_limits) {
1056  CFRelease(bytes_per_second);
1057  CFRelease(one_second);
1058  return AVERROR(ENOMEM);
1059  }
1060  status = VTSessionSetProperty(vtctx->session,
1061  kVTCompressionPropertyKey_DataRateLimits,
1062  data_rate_limits);
1063 
1064  CFRelease(bytes_per_second);
1065  CFRelease(one_second);
1066  CFRelease(data_rate_limits);
1067 
1068  if (status) {
1069  av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1070  return AVERROR_EXTERNAL;
1071  }
1072  }
1073 
1074  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1075  // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1076  if (profile_level) {
1077  status = VTSessionSetProperty(vtctx->session,
1078  kVTCompressionPropertyKey_ProfileLevel,
1079  profile_level);
1080  if (status) {
1081  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
1082  }
1083  }
1084  }
1085 
1086  if (avctx->gop_size > 0) {
1087  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1088  kCFNumberIntType,
1089  &avctx->gop_size);
1090  if (!interval) {
1091  return AVERROR(ENOMEM);
1092  }
1093 
1094  status = VTSessionSetProperty(vtctx->session,
1095  kVTCompressionPropertyKey_MaxKeyFrameInterval,
1096  interval);
1097  CFRelease(interval);
1098 
1099  if (status) {
1100  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1101  return AVERROR_EXTERNAL;
1102  }
1103  }
1104 
1105  if (vtctx->frames_before) {
1106  status = VTSessionSetProperty(vtctx->session,
1107  kVTCompressionPropertyKey_MoreFramesBeforeStart,
1108  kCFBooleanTrue);
1109 
1110  if (status == kVTPropertyNotSupportedErr) {
1111  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1112  } else if (status) {
1113  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1114  }
1115  }
1116 
1117  if (vtctx->frames_after) {
1118  status = VTSessionSetProperty(vtctx->session,
1119  kVTCompressionPropertyKey_MoreFramesAfterEnd,
1120  kCFBooleanTrue);
1121 
1122  if (status == kVTPropertyNotSupportedErr) {
1123  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1124  } else if (status) {
1125  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1126  }
1127  }
1128 
1129  if (avctx->sample_aspect_ratio.num != 0) {
1130  CFNumberRef num;
1131  CFNumberRef den;
1132  CFMutableDictionaryRef par;
1133  AVRational *avpar = &avctx->sample_aspect_ratio;
1134 
1135  av_reduce(&avpar->num, &avpar->den,
1136  avpar->num, avpar->den,
1137  0xFFFFFFFF);
1138 
1139  num = CFNumberCreate(kCFAllocatorDefault,
1140  kCFNumberIntType,
1141  &avpar->num);
1142 
1143  den = CFNumberCreate(kCFAllocatorDefault,
1144  kCFNumberIntType,
1145  &avpar->den);
1146 
1147 
1148 
1149  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1150  2,
1151  &kCFCopyStringDictionaryKeyCallBacks,
1152  &kCFTypeDictionaryValueCallBacks);
1153 
1154  if (!par || !num || !den) {
1155  if (par) CFRelease(par);
1156  if (num) CFRelease(num);
1157  if (den) CFRelease(den);
1158 
1159  return AVERROR(ENOMEM);
1160  }
1161 
1162  CFDictionarySetValue(
1163  par,
1164  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1165  num);
1166 
1167  CFDictionarySetValue(
1168  par,
1169  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1170  den);
1171 
1172  status = VTSessionSetProperty(vtctx->session,
1173  kVTCompressionPropertyKey_PixelAspectRatio,
1174  par);
1175 
1176  CFRelease(par);
1177  CFRelease(num);
1178  CFRelease(den);
1179 
1180  if (status) {
1181  av_log(avctx,
1182  AV_LOG_ERROR,
1183  "Error setting pixel aspect ratio to %d:%d: %d.\n",
1184  avctx->sample_aspect_ratio.num,
1185  avctx->sample_aspect_ratio.den,
1186  status);
1187 
1188  return AVERROR_EXTERNAL;
1189  }
1190  }
1191 
1192 
1193  if (vtctx->transfer_function) {
1194  status = VTSessionSetProperty(vtctx->session,
1195  kVTCompressionPropertyKey_TransferFunction,
1196  vtctx->transfer_function);
1197 
1198  if (status) {
1199  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1200  }
1201  }
1202 
1203 
1204  if (vtctx->ycbcr_matrix) {
1205  status = VTSessionSetProperty(vtctx->session,
1206  kVTCompressionPropertyKey_YCbCrMatrix,
1207  vtctx->ycbcr_matrix);
1208 
1209  if (status) {
1210  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1211  }
1212  }
1213 
1214 
1215  if (vtctx->color_primaries) {
1216  status = VTSessionSetProperty(vtctx->session,
1217  kVTCompressionPropertyKey_ColorPrimaries,
1218  vtctx->color_primaries);
1219 
1220  if (status) {
1221  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1222  }
1223  }
1224 
1225  if (gamma_level) {
1226  status = VTSessionSetProperty(vtctx->session,
1227  kCVImageBufferGammaLevelKey,
1228  gamma_level);
1229 
1230  if (status) {
1231  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1232  }
1233  }
1234 
1235  if (!vtctx->has_b_frames) {
1236  status = VTSessionSetProperty(vtctx->session,
1237  kVTCompressionPropertyKey_AllowFrameReordering,
1238  kCFBooleanFalse);
1239 
1240  if (status) {
1241  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1242  return AVERROR_EXTERNAL;
1243  }
1244  }
1245 
1246  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1247  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1248  compat_keys.kVTH264EntropyMode_CABAC:
1249  compat_keys.kVTH264EntropyMode_CAVLC;
1250 
1251  status = VTSessionSetProperty(vtctx->session,
1252  compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1253  entropy);
1254 
1255  if (status) {
1256  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1257  }
1258  }
1259 
1260  if (vtctx->realtime) {
1261  status = VTSessionSetProperty(vtctx->session,
1262  compat_keys.kVTCompressionPropertyKey_RealTime,
1263  kCFBooleanTrue);
1264 
1265  if (status) {
1266  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1267  }
1268  }
1269 
1270  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1271  if (status) {
1272  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1273  return AVERROR_EXTERNAL;
1274  }
1275 
1276  return 0;
1277 }
1278 
1280 {
1281  CFMutableDictionaryRef enc_info;
1282  CFMutableDictionaryRef pixel_buffer_info;
1283  CMVideoCodecType codec_type;
1284  VTEncContext *vtctx = avctx->priv_data;
1285  CFStringRef profile_level;
1286  CFNumberRef gamma_level = NULL;
1287  int status;
1288 
1290  if (!codec_type) {
1291  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1292  return AVERROR(EINVAL);
1293  }
1294 
1295  vtctx->codec_id = avctx->codec_id;
1296 
1297  if (vtctx->codec_id == AV_CODEC_ID_H264) {
1298  vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1299 
1300  vtctx->has_b_frames = avctx->max_b_frames > 0;
1301  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1302  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1303  vtctx->has_b_frames = false;
1304  }
1305 
1306  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1307  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1308  vtctx->entropy = VT_ENTROPY_NOT_SET;
1309  }
1310 
1311  if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1312  } else {
1313  vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1314  if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1315  if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1316  }
1317 
1318  enc_info = CFDictionaryCreateMutable(
1319  kCFAllocatorDefault,
1320  20,
1321  &kCFCopyStringDictionaryKeyCallBacks,
1322  &kCFTypeDictionaryValueCallBacks
1323  );
1324 
1325  if (!enc_info) return AVERROR(ENOMEM);
1326 
1327 #if !TARGET_OS_IPHONE
1328  if(vtctx->require_sw) {
1329  CFDictionarySetValue(enc_info,
1330  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1331  kCFBooleanFalse);
1332  } else if (!vtctx->allow_sw) {
1333  CFDictionarySetValue(enc_info,
1334  compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1335  kCFBooleanTrue);
1336  } else {
1337  CFDictionarySetValue(enc_info,
1338  compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1339  kCFBooleanTrue);
1340  }
1341 #endif
1342 
1343  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1344  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1345  if (status)
1346  goto init_cleanup;
1347  } else {
1348  pixel_buffer_info = NULL;
1349  }
1350 
1351  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1352 
1353  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1354  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1355  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1356 
1357 
1358  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1360  codec_type,
1361  profile_level,
1362  gamma_level,
1363  enc_info,
1364  pixel_buffer_info);
1365  if (status)
1366  goto init_cleanup;
1367  }
1368 
1369  status = vtenc_create_encoder(avctx,
1370  codec_type,
1371  profile_level,
1372  gamma_level,
1373  enc_info,
1374  pixel_buffer_info,
1375  &vtctx->session);
1376 
1377 init_cleanup:
1378  if (gamma_level)
1379  CFRelease(gamma_level);
1380 
1381  if (pixel_buffer_info)
1382  CFRelease(pixel_buffer_info);
1383 
1384  CFRelease(enc_info);
1385 
1386  return status;
1387 }
1388 
1390 {
1391  VTEncContext *vtctx = avctx->priv_data;
1392  CFBooleanRef has_b_frames_cfbool;
1393  int status;
1394 
1396 
1397  pthread_mutex_init(&vtctx->lock, NULL);
1399 
1400  vtctx->session = NULL;
1402  if (status) return status;
1403 
1404  status = VTSessionCopyProperty(vtctx->session,
1405  kVTCompressionPropertyKey_AllowFrameReordering,
1406  kCFAllocatorDefault,
1407  &has_b_frames_cfbool);
1408 
1409  if (!status && has_b_frames_cfbool) {
1410  //Some devices don't output B-frames for main profile, even if requested.
1411  vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1412  CFRelease(has_b_frames_cfbool);
1413  }
1414  avctx->has_b_frames = vtctx->has_b_frames;
1415 
1416  return 0;
1417 }
1418 
1419 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1420 {
1421  CFArrayRef attachments;
1422  CFDictionaryRef attachment;
1423  CFBooleanRef not_sync;
1424  CFIndex len;
1425 
1426  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1427  len = !attachments ? 0 : CFArrayGetCount(attachments);
1428 
1429  if (!len) {
1430  *is_key_frame = true;
1431  return;
1432  }
1433 
1434  attachment = CFArrayGetValueAtIndex(attachments, 0);
1435 
1436  if (CFDictionaryGetValueIfPresent(attachment,
1437  kCMSampleAttachmentKey_NotSync,
1438  (const void **)&not_sync))
1439  {
1440  *is_key_frame = !CFBooleanGetValue(not_sync);
1441  } else {
1442  *is_key_frame = true;
1443  }
1444 }
1445 
1446 static int is_post_sei_nal_type(int nal_type){
1447  return nal_type != H264_NAL_SEI &&
1448  nal_type != H264_NAL_SPS &&
1449  nal_type != H264_NAL_PPS &&
1450  nal_type != H264_NAL_AUD;
1451 }
1452 
1453 /*
1454  * Finds the sei message start/size of type find_sei_type.
1455  * If more than one of that type exists, the last one is returned.
1456  */
1457 static int find_sei_end(AVCodecContext *avctx,
1458  uint8_t *nal_data,
1459  size_t nal_size,
1460  uint8_t **sei_end)
1461 {
1462  int nal_type;
1463  size_t sei_payload_size = 0;
1464  int sei_payload_type = 0;
1465  *sei_end = NULL;
1466  uint8_t *nal_start = nal_data;
1467 
1468  if (!nal_size)
1469  return 0;
1470 
1471  nal_type = *nal_data & 0x1F;
1472  if (nal_type != H264_NAL_SEI)
1473  return 0;
1474 
1475  nal_data++;
1476  nal_size--;
1477 
1478  if (nal_data[nal_size - 1] == 0x80)
1479  nal_size--;
1480 
1481  while (nal_size > 0 && *nal_data > 0) {
1482  do{
1483  sei_payload_type += *nal_data;
1484  nal_data++;
1485  nal_size--;
1486  } while (nal_size > 0 && *nal_data == 0xFF);
1487 
1488  if (!nal_size) {
1489  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1490  return AVERROR_INVALIDDATA;
1491  }
1492 
1493  do{
1494  sei_payload_size += *nal_data;
1495  nal_data++;
1496  nal_size--;
1497  } while (nal_size > 0 && *nal_data == 0xFF);
1498 
1499  if (nal_size < sei_payload_size) {
1500  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1501  return AVERROR_INVALIDDATA;
1502  }
1503 
1504  nal_data += sei_payload_size;
1505  nal_size -= sei_payload_size;
1506  }
1507 
1508  *sei_end = nal_data;
1509 
1510  return nal_data - nal_start + 1;
1511 }
1512 
1513 /**
1514  * Copies the data inserting emulation prevention bytes as needed.
1515  * Existing data in the destination can be taken into account by providing
1516  * dst with a dst_offset > 0.
1517  *
1518  * @return The number of bytes copied on success. On failure, the negative of
1519  * the number of bytes needed to copy src is returned.
1520  */
1521 static int copy_emulation_prev(const uint8_t *src,
1522  size_t src_size,
1523  uint8_t *dst,
1524  ssize_t dst_offset,
1525  size_t dst_size)
1526 {
1527  int zeros = 0;
1528  int wrote_bytes;
1529  uint8_t* dst_start;
1530  uint8_t* dst_end = dst + dst_size;
1531  const uint8_t* src_end = src + src_size;
1532  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1533  int i;
1534  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1535  if (!dst[i])
1536  zeros++;
1537  else
1538  zeros = 0;
1539  }
1540 
1541  dst += dst_offset;
1542  dst_start = dst;
1543  for (; src < src_end; src++, dst++) {
1544  if (zeros == 2) {
1545  int insert_ep3_byte = *src <= 3;
1546  if (insert_ep3_byte) {
1547  if (dst < dst_end)
1548  *dst = 3;
1549  dst++;
1550  }
1551 
1552  zeros = 0;
1553  }
1554 
1555  if (dst < dst_end)
1556  *dst = *src;
1557 
1558  if (!*src)
1559  zeros++;
1560  else
1561  zeros = 0;
1562  }
1563 
1564  wrote_bytes = dst - dst_start;
1565 
1566  if (dst > dst_end)
1567  return -wrote_bytes;
1568 
1569  return wrote_bytes;
1570 }
1571 
1572 static int write_sei(const ExtraSEI *sei,
1573  int sei_type,
1574  uint8_t *dst,
1575  size_t dst_size)
1576 {
1577  uint8_t *sei_start = dst;
1578  size_t remaining_sei_size = sei->size;
1579  size_t remaining_dst_size = dst_size;
1580  int header_bytes;
1581  int bytes_written;
1582  ssize_t offset;
1583 
1584  if (!remaining_dst_size)
1585  return AVERROR_BUFFER_TOO_SMALL;
1586 
1587  while (sei_type && remaining_dst_size != 0) {
1588  int sei_byte = sei_type > 255 ? 255 : sei_type;
1589  *dst = sei_byte;
1590 
1591  sei_type -= sei_byte;
1592  dst++;
1593  remaining_dst_size--;
1594  }
1595 
1596  if (!dst_size)
1597  return AVERROR_BUFFER_TOO_SMALL;
1598 
1599  while (remaining_sei_size && remaining_dst_size != 0) {
1600  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1601  *dst = size_byte;
1602 
1603  remaining_sei_size -= size_byte;
1604  dst++;
1605  remaining_dst_size--;
1606  }
1607 
1608  if (remaining_dst_size < sei->size)
1609  return AVERROR_BUFFER_TOO_SMALL;
1610 
1611  header_bytes = dst - sei_start;
1612 
1613  offset = header_bytes;
1614  bytes_written = copy_emulation_prev(sei->data,
1615  sei->size,
1616  sei_start,
1617  offset,
1618  dst_size);
1619  if (bytes_written < 0)
1620  return AVERROR_BUFFER_TOO_SMALL;
1621 
1622  bytes_written += header_bytes;
1623  return bytes_written;
1624 }
1625 
1626 /**
1627  * Copies NAL units and replaces length codes with
1628  * H.264 Annex B start codes. On failure, the contents of
1629  * dst_data may have been modified.
1630  *
1631  * @param length_code_size Byte length of each length code
1632  * @param sample_buffer NAL units prefixed with length codes.
1633  * @param sei Optional A53 closed captions SEI data.
1634  * @param dst_data Must be zeroed before calling this function.
1635  * Contains the copied NAL units prefixed with
1636  * start codes when the function returns
1637  * successfully.
1638  * @param dst_size Length of dst_data
1639  * @return 0 on success
1640  * AVERROR_INVALIDDATA if length_code_size is invalid
1641  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1642  * or if a length_code in src_data specifies data beyond
1643  * the end of its buffer.
1644  */
1646  AVCodecContext *avctx,
1647  size_t length_code_size,
1648  CMSampleBufferRef sample_buffer,
1649  ExtraSEI *sei,
1650  uint8_t *dst_data,
1651  size_t dst_size)
1652 {
1653  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1654  size_t remaining_src_size = src_size;
1655  size_t remaining_dst_size = dst_size;
1656  size_t src_offset = 0;
1657  int wrote_sei = 0;
1658  int status;
1659  uint8_t size_buf[4];
1660  uint8_t nal_type;
1661  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1662 
1663  if (length_code_size > 4) {
1664  return AVERROR_INVALIDDATA;
1665  }
1666 
1667  while (remaining_src_size > 0) {
1668  size_t curr_src_len;
1669  size_t curr_dst_len;
1670  size_t box_len = 0;
1671  size_t i;
1672 
1673  uint8_t *dst_box;
1674 
1675  status = CMBlockBufferCopyDataBytes(block,
1676  src_offset,
1677  length_code_size,
1678  size_buf);
1679  if (status) {
1680  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1681  return AVERROR_EXTERNAL;
1682  }
1683 
1684  status = CMBlockBufferCopyDataBytes(block,
1685  src_offset + length_code_size,
1686  1,
1687  &nal_type);
1688 
1689  if (status) {
1690  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1691  return AVERROR_EXTERNAL;
1692  }
1693 
1694  nal_type &= 0x1F;
1695 
1696  for (i = 0; i < length_code_size; i++) {
1697  box_len <<= 8;
1698  box_len |= size_buf[i];
1699  }
1700 
1701  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1702  //No SEI NAL unit - insert.
1703  int wrote_bytes;
1704 
1705  memcpy(dst_data, start_code, sizeof(start_code));
1706  dst_data += sizeof(start_code);
1707  remaining_dst_size -= sizeof(start_code);
1708 
1709  *dst_data = H264_NAL_SEI;
1710  dst_data++;
1711  remaining_dst_size--;
1712 
1713  wrote_bytes = write_sei(sei,
1715  dst_data,
1716  remaining_dst_size);
1717 
1718  if (wrote_bytes < 0)
1719  return wrote_bytes;
1720 
1721  remaining_dst_size -= wrote_bytes;
1722  dst_data += wrote_bytes;
1723 
1724  if (remaining_dst_size <= 0)
1725  return AVERROR_BUFFER_TOO_SMALL;
1726 
1727  *dst_data = 0x80;
1728 
1729  dst_data++;
1730  remaining_dst_size--;
1731 
1732  wrote_sei = 1;
1733  }
1734 
1735  curr_src_len = box_len + length_code_size;
1736  curr_dst_len = box_len + sizeof(start_code);
1737 
1738  if (remaining_src_size < curr_src_len) {
1739  return AVERROR_BUFFER_TOO_SMALL;
1740  }
1741 
1742  if (remaining_dst_size < curr_dst_len) {
1743  return AVERROR_BUFFER_TOO_SMALL;
1744  }
1745 
1746  dst_box = dst_data + sizeof(start_code);
1747 
1748  memcpy(dst_data, start_code, sizeof(start_code));
1749  status = CMBlockBufferCopyDataBytes(block,
1750  src_offset + length_code_size,
1751  box_len,
1752  dst_box);
1753 
1754  if (status) {
1755  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1756  return AVERROR_EXTERNAL;
1757  }
1758 
1759  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1760  //Found SEI NAL unit - append.
1761  int wrote_bytes;
1762  int old_sei_length;
1763  int extra_bytes;
1764  uint8_t *new_sei;
1765  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1766  if (old_sei_length < 0)
1767  return status;
1768 
1769  wrote_bytes = write_sei(sei,
1771  new_sei,
1772  remaining_dst_size - old_sei_length);
1773  if (wrote_bytes < 0)
1774  return wrote_bytes;
1775 
1776  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1777  return AVERROR_BUFFER_TOO_SMALL;
1778 
1779  new_sei[wrote_bytes++] = 0x80;
1780  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1781 
1782  dst_data += extra_bytes;
1783  remaining_dst_size -= extra_bytes;
1784 
1785  wrote_sei = 1;
1786  }
1787 
1788  src_offset += curr_src_len;
1789  dst_data += curr_dst_len;
1790 
1791  remaining_src_size -= curr_src_len;
1792  remaining_dst_size -= curr_dst_len;
1793  }
1794 
1795  return 0;
1796 }
1797 
1798 /**
1799  * Returns a sufficient number of bytes to contain the sei data.
1800  * It may be greater than the minimum required.
1801  */
1802 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1803  int copied_size;
1804  if (sei->size == 0)
1805  return 0;
1806 
1807  copied_size = -copy_emulation_prev(sei->data,
1808  sei->size,
1809  NULL,
1810  0,
1811  0);
1812 
1813  if ((sei->size % 255) == 0) //may result in an extra byte
1814  copied_size++;
1815 
1816  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1817 }
1818 
1820  AVCodecContext *avctx,
1821  CMSampleBufferRef sample_buffer,
1822  AVPacket *pkt,
1823  ExtraSEI *sei)
1824 {
1825  VTEncContext *vtctx = avctx->priv_data;
1826 
1827  int status;
1828  bool is_key_frame;
1829  bool add_header;
1830  size_t length_code_size;
1831  size_t header_size = 0;
1832  size_t in_buf_size;
1833  size_t out_buf_size;
1834  size_t sei_nalu_size = 0;
1835  int64_t dts_delta;
1836  int64_t time_base_num;
1837  int nalu_count;
1838  CMTime pts;
1839  CMTime dts;
1840  CMVideoFormatDescriptionRef vid_fmt;
1841 
1842 
1843  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1844  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1845  if (status) return status;
1846 
1847  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1848 
1849  if (add_header) {
1850  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1851  if (!vid_fmt) {
1852  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1853  return AVERROR_EXTERNAL;
1854  }
1855 
1856  int status = get_params_size(avctx, vid_fmt, &header_size);
1857  if (status) return status;
1858  }
1859 
1860  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1861  if(status)
1862  return status;
1863 
1864  if (sei) {
1865  size_t msg_size = get_sei_msg_bytes(sei,
1867 
1868  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1869  }
1870 
1871  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1872  out_buf_size = header_size +
1873  in_buf_size +
1874  sei_nalu_size +
1875  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1876 
1877  status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1878  if (status < 0)
1879  return status;
1880 
1881  if (add_header) {
1882  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1883  if(status) return status;
1884  }
1885 
1887  avctx,
1888  length_code_size,
1889  sample_buffer,
1890  sei,
1891  pkt->data + header_size,
1892  pkt->size - header_size
1893  );
1894 
1895  if (status) {
1896  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1897  return status;
1898  }
1899 
1900  if (is_key_frame) {
1902  }
1903 
1904  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1905  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1906 
1907  if (CMTIME_IS_INVALID(dts)) {
1908  if (!vtctx->has_b_frames) {
1909  dts = pts;
1910  } else {
1911  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1912  return AVERROR_EXTERNAL;
1913  }
1914  }
1915 
1916  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1917  time_base_num = avctx->time_base.num;
1918  pkt->pts = pts.value / time_base_num;
1919  pkt->dts = dts.value / time_base_num - dts_delta;
1920  pkt->size = out_buf_size;
1921 
1922  return 0;
1923 }
1924 
1925 /*
1926  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1927  * containing all planes if so.
1928  */
1930  AVCodecContext *avctx,
1931  const AVFrame *frame,
1932  int *color,
1933  int *plane_count,
1934  size_t *widths,
1935  size_t *heights,
1936  size_t *strides,
1937  size_t *contiguous_buf_size)
1938 {
1939  VTEncContext *vtctx = avctx->priv_data;
1940  int av_format = frame->format;
1941  int av_color_range = frame->color_range;
1942  int i;
1943  int range_guessed;
1944  int status;
1945 
1946  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1947  if (status) {
1948  av_log(avctx,
1949  AV_LOG_ERROR,
1950  "Could not get pixel format for color format '%s' range '%s'.\n",
1951  av_get_pix_fmt_name(av_format),
1952  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1953  av_color_range < AVCOL_RANGE_NB ?
1954  av_color_range_name(av_color_range) :
1955  "Unknown");
1956 
1957  return AVERROR(EINVAL);
1958  }
1959 
1960  if (range_guessed) {
1961  if (!vtctx->warned_color_range) {
1962  vtctx->warned_color_range = true;
1963  av_log(avctx,
1965  "Color range not set for %s. Using MPEG range.\n",
1966  av_get_pix_fmt_name(av_format));
1967  }
1968  }
1969 
1970  switch (av_format) {
1971  case AV_PIX_FMT_NV12:
1972  *plane_count = 2;
1973 
1974  widths [0] = avctx->width;
1975  heights[0] = avctx->height;
1976  strides[0] = frame ? frame->linesize[0] : avctx->width;
1977 
1978  widths [1] = (avctx->width + 1) / 2;
1979  heights[1] = (avctx->height + 1) / 2;
1980  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
1981  break;
1982 
1983  case AV_PIX_FMT_YUV420P:
1984  *plane_count = 3;
1985 
1986  widths [0] = avctx->width;
1987  heights[0] = avctx->height;
1988  strides[0] = frame ? frame->linesize[0] : avctx->width;
1989 
1990  widths [1] = (avctx->width + 1) / 2;
1991  heights[1] = (avctx->height + 1) / 2;
1992  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
1993 
1994  widths [2] = (avctx->width + 1) / 2;
1995  heights[2] = (avctx->height + 1) / 2;
1996  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
1997  break;
1998 
1999  case AV_PIX_FMT_P010LE:
2000  *plane_count = 2;
2001  widths[0] = avctx->width;
2002  heights[0] = avctx->height;
2003  strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2004 
2005  widths[1] = (avctx->width + 1) / 2;
2006  heights[1] = (avctx->height + 1) / 2;
2007  strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2008  break;
2009 
2010  default:
2011  av_log(
2012  avctx,
2013  AV_LOG_ERROR,
2014  "Could not get frame format info for color %d range %d.\n",
2015  av_format,
2016  av_color_range);
2017 
2018  return AVERROR(EINVAL);
2019  }
2020 
2021  *contiguous_buf_size = 0;
2022  for (i = 0; i < *plane_count; i++) {
2023  if (i < *plane_count - 1 &&
2024  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2025  *contiguous_buf_size = 0;
2026  break;
2027  }
2028 
2029  *contiguous_buf_size += strides[i] * heights[i];
2030  }
2031 
2032  return 0;
2033 }
2034 
2035 //Not used on OSX - frame is never copied.
2037  const AVFrame *frame,
2038  CVPixelBufferRef cv_img,
2039  const size_t *plane_strides,
2040  const size_t *plane_rows)
2041 {
2042  int i, j;
2043  size_t plane_count;
2044  int status;
2045  int rows;
2046  int src_stride;
2047  int dst_stride;
2048  uint8_t *src_addr;
2049  uint8_t *dst_addr;
2050  size_t copy_bytes;
2051 
2052  status = CVPixelBufferLockBaseAddress(cv_img, 0);
2053  if (status) {
2054  av_log(
2055  avctx,
2056  AV_LOG_ERROR,
2057  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2058  status
2059  );
2060  }
2061 
2062  if (CVPixelBufferIsPlanar(cv_img)) {
2063  plane_count = CVPixelBufferGetPlaneCount(cv_img);
2064  for (i = 0; frame->data[i]; i++) {
2065  if (i == plane_count) {
2066  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2067  av_log(avctx,
2068  AV_LOG_ERROR,
2069  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2070  );
2071 
2072  return AVERROR_EXTERNAL;
2073  }
2074 
2075  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2076  src_addr = (uint8_t*)frame->data[i];
2077  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2078  src_stride = plane_strides[i];
2079  rows = plane_rows[i];
2080 
2081  if (dst_stride == src_stride) {
2082  memcpy(dst_addr, src_addr, src_stride * rows);
2083  } else {
2084  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2085 
2086  for (j = 0; j < rows; j++) {
2087  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2088  }
2089  }
2090  }
2091  } else {
2092  if (frame->data[1]) {
2093  CVPixelBufferUnlockBaseAddress(cv_img, 0);
2094  av_log(avctx,
2095  AV_LOG_ERROR,
2096  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2097  );
2098 
2099  return AVERROR_EXTERNAL;
2100  }
2101 
2102  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2103  src_addr = (uint8_t*)frame->data[0];
2104  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2105  src_stride = plane_strides[0];
2106  rows = plane_rows[0];
2107 
2108  if (dst_stride == src_stride) {
2109  memcpy(dst_addr, src_addr, src_stride * rows);
2110  } else {
2111  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2112 
2113  for (j = 0; j < rows; j++) {
2114  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2115  }
2116  }
2117  }
2118 
2119  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2120  if (status) {
2121  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2122  return AVERROR_EXTERNAL;
2123  }
2124 
2125  return 0;
2126 }
2127 
2129  const AVFrame *frame,
2130  CVPixelBufferRef *cv_img)
2131 {
2132  int plane_count;
2133  int color;
2134  size_t widths [AV_NUM_DATA_POINTERS];
2135  size_t heights[AV_NUM_DATA_POINTERS];
2136  size_t strides[AV_NUM_DATA_POINTERS];
2137  int status;
2138  size_t contiguous_buf_size;
2139  CVPixelBufferPoolRef pix_buf_pool;
2140  VTEncContext* vtctx = avctx->priv_data;
2141 
2142  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2144 
2145  *cv_img = (CVPixelBufferRef)frame->data[3];
2146  av_assert0(*cv_img);
2147 
2148  CFRetain(*cv_img);
2149  return 0;
2150  }
2151 
2152  memset(widths, 0, sizeof(widths));
2153  memset(heights, 0, sizeof(heights));
2154  memset(strides, 0, sizeof(strides));
2155 
2157  avctx,
2158  frame,
2159  &color,
2160  &plane_count,
2161  widths,
2162  heights,
2163  strides,
2164  &contiguous_buf_size
2165  );
2166 
2167  if (status) {
2168  av_log(
2169  avctx,
2170  AV_LOG_ERROR,
2171  "Error: Cannot convert format %d color_range %d: %d\n",
2172  frame->format,
2173  frame->color_range,
2174  status
2175  );
2176 
2177  return AVERROR_EXTERNAL;
2178  }
2179 
2180  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2181  if (!pix_buf_pool) {
2182  /* On iOS, the VT session is invalidated when the APP switches from
2183  * foreground to background and vice versa. Fetch the actual error code
2184  * of the VT session to detect that case and restart the VT session
2185  * accordingly. */
2186  OSStatus vtstatus;
2187 
2188  vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2189  if (vtstatus == kVTInvalidSessionErr) {
2190  CFRelease(vtctx->session);
2191  vtctx->session = NULL;
2193  if (status == 0)
2194  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2195  }
2196  if (!pix_buf_pool) {
2197  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2198  return AVERROR_EXTERNAL;
2199  }
2200  else
2201  av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2202  "kVTInvalidSessionErr error.\n");
2203  }
2204 
2205  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2206  pix_buf_pool,
2207  cv_img);
2208 
2209 
2210  if (status) {
2211  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2212  return AVERROR_EXTERNAL;
2213  }
2214 
2215  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2216  if (status) {
2217  CFRelease(*cv_img);
2218  *cv_img = NULL;
2219  return status;
2220  }
2221 
2222  return 0;
2223 }
2224 
2226  CFDictionaryRef* dict_out)
2227 {
2228  CFDictionaryRef dict = NULL;
2229  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2230  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2231  const void *vals[] = { kCFBooleanTrue };
2232 
2233  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2234  if(!dict) return AVERROR(ENOMEM);
2235  }
2236 
2237  *dict_out = dict;
2238  return 0;
2239 }
2240 
2242  VTEncContext *vtctx,
2243  const AVFrame *frame)
2244 {
2245  CMTime time;
2246  CFDictionaryRef frame_dict;
2247  CVPixelBufferRef cv_img = NULL;
2248  AVFrameSideData *side_data = NULL;
2249  ExtraSEI *sei = NULL;
2250  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2251 
2252  if (status) return status;
2253 
2254  status = create_encoder_dict_h264(frame, &frame_dict);
2255  if (status) {
2256  CFRelease(cv_img);
2257  return status;
2258  }
2259 
2261  if (vtctx->a53_cc && side_data && side_data->size) {
2262  sei = av_mallocz(sizeof(*sei));
2263  if (!sei) {
2264  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2265  } else {
2266  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2267  if (ret < 0) {
2268  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2269  av_free(sei);
2270  sei = NULL;
2271  }
2272  }
2273  }
2274 
2275  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2276  status = VTCompressionSessionEncodeFrame(
2277  vtctx->session,
2278  cv_img,
2279  time,
2280  kCMTimeInvalid,
2281  frame_dict,
2282  sei,
2283  NULL
2284  );
2285 
2286  if (frame_dict) CFRelease(frame_dict);
2287  CFRelease(cv_img);
2288 
2289  if (status) {
2290  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2291  return AVERROR_EXTERNAL;
2292  }
2293 
2294  return 0;
2295 }
2296 
2298  AVCodecContext *avctx,
2299  AVPacket *pkt,
2300  const AVFrame *frame,
2301  int *got_packet)
2302 {
2303  VTEncContext *vtctx = avctx->priv_data;
2304  bool get_frame;
2305  int status;
2306  CMSampleBufferRef buf = NULL;
2307  ExtraSEI *sei = NULL;
2308 
2309  if (frame) {
2310  status = vtenc_send_frame(avctx, vtctx, frame);
2311 
2312  if (status) {
2314  goto end_nopkt;
2315  }
2316 
2317  if (vtctx->frame_ct_in == 0) {
2318  vtctx->first_pts = frame->pts;
2319  } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2320  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2321  }
2322 
2323  vtctx->frame_ct_in++;
2324  } else if(!vtctx->flushing) {
2325  vtctx->flushing = true;
2326 
2327  status = VTCompressionSessionCompleteFrames(vtctx->session,
2328  kCMTimeIndefinite);
2329 
2330  if (status) {
2331  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2333  goto end_nopkt;
2334  }
2335  }
2336 
2337  *got_packet = 0;
2338  get_frame = vtctx->dts_delta >= 0 || !frame;
2339  if (!get_frame) {
2340  status = 0;
2341  goto end_nopkt;
2342  }
2343 
2344  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2345  if (status) goto end_nopkt;
2346  if (!buf) goto end_nopkt;
2347 
2348  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2349  if (sei) {
2350  if (sei->data) av_free(sei->data);
2351  av_free(sei);
2352  }
2353  CFRelease(buf);
2354  if (status) goto end_nopkt;
2355 
2356  *got_packet = 1;
2357  return 0;
2358 
2359 end_nopkt:
2361  return status;
2362 }
2363 
2365  CMVideoCodecType codec_type,
2366  CFStringRef profile_level,
2367  CFNumberRef gamma_level,
2368  CFDictionaryRef enc_info,
2369  CFDictionaryRef pixel_buffer_info)
2370 {
2371  VTEncContext *vtctx = avctx->priv_data;
2372  int status;
2373  CVPixelBufferPoolRef pool = NULL;
2374  CVPixelBufferRef pix_buf = NULL;
2375  CMTime time;
2376  CMSampleBufferRef buf = NULL;
2377 
2378  status = vtenc_create_encoder(avctx,
2379  codec_type,
2380  profile_level,
2381  gamma_level,
2382  enc_info,
2383  pixel_buffer_info,
2384  &vtctx->session);
2385  if (status)
2386  goto pe_cleanup;
2387 
2388  pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2389  if(!pool){
2390  av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2391  goto pe_cleanup;
2392  }
2393 
2394  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2395  pool,
2396  &pix_buf);
2397 
2398  if(status != kCVReturnSuccess){
2399  av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2400  goto pe_cleanup;
2401  }
2402 
2403  time = CMTimeMake(0, avctx->time_base.den);
2404  status = VTCompressionSessionEncodeFrame(vtctx->session,
2405  pix_buf,
2406  time,
2407  kCMTimeInvalid,
2408  NULL,
2409  NULL,
2410  NULL);
2411 
2412  if (status) {
2413  av_log(avctx,
2414  AV_LOG_ERROR,
2415  "Error sending frame for extradata: %d\n",
2416  status);
2417 
2418  goto pe_cleanup;
2419  }
2420 
2421  //Populates extradata - output frames are flushed and param sets are available.
2422  status = VTCompressionSessionCompleteFrames(vtctx->session,
2423  kCMTimeIndefinite);
2424 
2425  if (status)
2426  goto pe_cleanup;
2427 
2428  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2429  if (status) {
2430  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2431  goto pe_cleanup;
2432  }
2433 
2434  CFRelease(buf);
2435 
2436 
2437 
2438 pe_cleanup:
2439  if(vtctx->session)
2440  CFRelease(vtctx->session);
2441 
2442  vtctx->session = NULL;
2443  vtctx->frame_ct_out = 0;
2444 
2445  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2446 
2447  return status;
2448 }
2449 
2451 {
2452  VTEncContext *vtctx = avctx->priv_data;
2453 
2455  pthread_mutex_destroy(&vtctx->lock);
2456 
2457  if(!vtctx->session) return 0;
2458 
2459  VTCompressionSessionCompleteFrames(vtctx->session,
2460  kCMTimeIndefinite);
2461  clear_frame_queue(vtctx);
2462  CFRelease(vtctx->session);
2463  vtctx->session = NULL;
2464 
2465  if (vtctx->color_primaries) {
2466  CFRelease(vtctx->color_primaries);
2467  vtctx->color_primaries = NULL;
2468  }
2469 
2470  if (vtctx->transfer_function) {
2471  CFRelease(vtctx->transfer_function);
2472  vtctx->transfer_function = NULL;
2473  }
2474 
2475  if (vtctx->ycbcr_matrix) {
2476  CFRelease(vtctx->ycbcr_matrix);
2477  vtctx->ycbcr_matrix = NULL;
2478  }
2479 
2480  return 0;
2481 }
2482 
2483 static const enum AVPixelFormat avc_pix_fmts[] = {
2488 };
2489 
2490 static const enum AVPixelFormat hevc_pix_fmts[] = {
2496 };
2497 
2498 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2499 #define COMMON_OPTIONS \
2500  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2501  { .i64 = 0 }, 0, 1, VE }, \
2502  { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2503  { .i64 = 0 }, 0, 1, VE }, \
2504  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2505  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2506  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2507  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2508  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2509  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2510 
2511 #define OFFSET(x) offsetof(VTEncContext, x)
2512 static const AVOption h264_options[] = {
2513  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2514  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2515  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2516  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2517 
2518  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2519  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2520  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2521  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2522  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2523  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2524  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2525  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2526  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2527  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2528  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2529 
2530  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2531  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2532  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2533  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2534  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2535 
2536  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2537 
2539  { NULL },
2540 };
2541 
2543  .class_name = "h264_videotoolbox",
2544  .item_name = av_default_item_name,
2545  .option = h264_options,
2546  .version = LIBAVUTIL_VERSION_INT,
2547 };
2548 
2550  .name = "h264_videotoolbox",
2551  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2552  .type = AVMEDIA_TYPE_VIDEO,
2553  .id = AV_CODEC_ID_H264,
2554  .priv_data_size = sizeof(VTEncContext),
2556  .init = vtenc_init,
2557  .encode2 = vtenc_frame,
2558  .close = vtenc_close,
2559  .capabilities = AV_CODEC_CAP_DELAY,
2560  .priv_class = &h264_videotoolbox_class,
2561  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2563 };
2564 
2565 static const AVOption hevc_options[] = {
2566  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2567  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2568  { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2569 
2571  { NULL },
2572 };
2573 
2575  .class_name = "hevc_videotoolbox",
2576  .item_name = av_default_item_name,
2577  .option = hevc_options,
2578  .version = LIBAVUTIL_VERSION_INT,
2579 };
2580 
2582  .name = "hevc_videotoolbox",
2583  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2584  .type = AVMEDIA_TYPE_VIDEO,
2585  .id = AV_CODEC_ID_HEVC,
2586  .priv_data_size = sizeof(VTEncContext),
2588  .init = vtenc_init,
2589  .encode2 = vtenc_frame,
2590  .close = vtenc_close,
2591  .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2592  .priv_class = &hevc_videotoolbox_class,
2593  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2595  .wrapper_name = "videotoolbox",
2596 };
get_vt_hevc_profile_level
static bool get_vt_hevc_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:723
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:49
kVTProfileLevel_H264_Main_5_1
CFStringRef kVTProfileLevel_H264_Main_5_1
Definition: videotoolboxenc.c:71
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
AVCodec
AVCodec.
Definition: avcodec.h:3481
kVTCompressionPropertyKey_H264EntropyMode
CFStringRef kVTCompressionPropertyKey_H264EntropyMode
Definition: videotoolboxenc.c:60
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ExtraSEI::size
size_t size
Definition: videotoolboxenc.c:177
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
level
uint8_t level
Definition: svq3.c:207
kVTCompressionPropertyKey_RealTime
CFStringRef kVTCompressionPropertyKey_RealTime
Definition: videotoolboxenc.c:87
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
hevc_pix_fmts
static enum AVPixelFormat hevc_pix_fmts[]
Definition: videotoolboxenc.c:2490
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:689
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2193
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:467
color
Definition: vf_paletteuse.c:588
vtenc_populate_extradata
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
Definition: videotoolboxenc.c:2364
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:734
vtenc_cm_to_avpacket
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
Definition: videotoolboxenc.c:1819
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
AV_CODEC_CAP_HARDWARE
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
Definition: avcodec.h:1078
H264_PROF_AUTO
@ H264_PROF_AUTO
Definition: videotoolboxenc.c:153
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:100
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
copy_avframe_to_pixel_buffer
static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef cv_img, const size_t *plane_strides, const size_t *plane_rows)
Definition: videotoolboxenc.c:2036
vtenc_output_callback
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:556
count
void INT64 INT64 count
Definition: avisynth_c.h:767
get_vt_h264_profile_level
static bool get_vt_h264_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
Definition: videotoolboxenc.c:626
profile
mfxU16 profile
Definition: qsvenc.c:44
write_sei
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:1572
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:173
pixdesc.h
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
Definition: videotoolboxenc.c:44
kVTProfileLevel_H264_High_AutoLevel
CFStringRef kVTProfileLevel_H264_High_AutoLevel
Definition: videotoolboxenc.c:82
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2186
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AVOption
AVOption.
Definition: opt.h:246
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
kVTProfileLevel_H264_High_4_0
CFStringRef kVTProfileLevel_H264_High_4_0
Definition: videotoolboxenc.c:77
VTEncContext::frames_before
int64_t frames_before
Definition: videotoolboxenc.c:214
VTEncContext::lock
pthread_mutex_t lock
Definition: videotoolboxenc.c:196
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:483
VTEncContext::profile
int64_t profile
Definition: videotoolboxenc.c:210
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
HEVC_PROF_MAIN10
@ HEVC_PROF_MAIN10
Definition: videotoolboxenc.c:169
H264_SEI_TYPE_USER_DATA_REGISTERED
@ H264_SEI_TYPE_USER_DATA_REGISTERED
registered user data as specified by Rec. ITU-T T.35
Definition: h264_sei.h:32
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
compat_keys
static struct @176 compat_keys
BufNode::sei
ExtraSEI * sei
Definition: videotoolboxenc.c:182
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:904
copy_replace_length_codes
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
Definition: videotoolboxenc.c:1645
vtenc_create_encoder
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
Definition: videotoolboxenc.c:974
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
VTEncContext::level
int64_t level
Definition: videotoolboxenc.c:211
fmt
const char * fmt
Definition: avisynth_c.h:861
vtenc_get_frame_info
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
Definition: videotoolboxenc.c:1419
get_cv_pixel_format
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
Definition: videotoolboxenc.c:752
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2915
VTEncContext::frames_after
int64_t frames_after
Definition: videotoolboxenc.c:215
vtenc_close
static av_cold int vtenc_close(AVCodecContext *avctx)
Definition: videotoolboxenc.c:2450
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:523
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:473
add_color_attr
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
Definition: videotoolboxenc.c:782
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
kCVImageBufferYCbCrMatrix_ITU_R_2020
CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020
Definition: videotoolboxenc.c:58
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
pts
static int64_t pts
Definition: transcode_aac.c:647
VTEncContext::flushing
bool flushing
Definition: videotoolboxenc.c:220
HEVC_PROF_COUNT
@ HEVC_PROF_COUNT
Definition: videotoolboxenc.c:170
create_encoder_dict_h264
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
Definition: videotoolboxenc.c:2225
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:472
kVTProfileLevel_HEVC_Main10_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel
Definition: videotoolboxenc.c:85
h264_options
static const AVOption h264_options[]
Definition: videotoolboxenc.c:2512
avassert.h
get_params_size
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
Definition: videotoolboxenc.c:404
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2179
VTEncContext::dts_delta
int64_t dts_delta
Definition: videotoolboxenc.c:208
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
VTEncContext::first_pts
int64_t first_pts
Definition: videotoolboxenc.c:207
avc_pix_fmts
static enum AVPixelFormat avc_pix_fmts[]
Definition: videotoolboxenc.c:2483
kVTProfileLevel_H264_High_4_2
CFStringRef kVTProfileLevel_H264_High_4_2
Definition: videotoolboxenc.c:79
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1855
VTEncContext::async_error
int async_error
Definition: videotoolboxenc.c:199
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2565
VT_H264Profile
VT_H264Profile
Definition: videotoolboxenc.c:152
VT_CABAC
@ VT_CABAC
Definition: videotoolboxenc.c:163
get_cm_codec_type
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
Definition: videotoolboxenc.c:387
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
VTEncContext::cv_sample_sent
pthread_cond_t cv_sample_sent
Definition: videotoolboxenc.c:197
VTEncContext::transfer_function
CFStringRef transfer_function
Definition: videotoolboxenc.c:193
info
MIPS optimizations info
Definition: mips.txt:2
H264_PROF_MAIN
@ H264_PROF_MAIN
Definition: videotoolboxenc.c:155
loadVTEncSymbols
static void loadVTEncSymbols()
Definition: videotoolboxenc.c:106
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex
Definition: videotoolboxenc.c:92
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HEVC_PROF_AUTO
@ HEVC_PROF_AUTO
Definition: videotoolboxenc.c:167
ff_alloc_a53_sei
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: utils.c:2212
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolboxenc.c:39
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2443
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:446
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: avcodec.h:245
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
getParameterSetAtIndex
OSStatus(* getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc, size_t parameterSetIndex, const uint8_t **parameterSetPointerOut, size_t *parameterSetSizeOut, size_t *parameterSetCountOut, int *NALUnitHeaderLengthOut)
Definition: videotoolboxenc.c:47
set_extradata
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
Definition: videotoolboxenc.c:522
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2848
VTEncContext::frame_ct_in
int64_t frame_ct_in
Definition: videotoolboxenc.c:205
kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:90
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
kVTProfileLevel_HEVC_Main_AutoLevel
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel
Definition: videotoolboxenc.c:84
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
BufNode
Definition: videotoolboxenc.c:180
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
kVTProfileLevel_H264_Baseline_5_2
CFStringRef kVTProfileLevel_H264_Baseline_5_2
Definition: videotoolboxenc.c:68
h264_videotoolbox_class
static const AVClass h264_videotoolbox_class
Definition: videotoolboxenc.c:2542
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1615
ff_hevc_videotoolbox_encoder
AVCodec ff_hevc_videotoolbox_encoder
Definition: videotoolboxenc.c:2581
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
create_cv_pixel_buffer_info
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
Definition: videotoolboxenc.c:804
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2867
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:482
pthread_once
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
Definition: os2threads.h:184
H264_PROF_BASELINE
@ H264_PROF_BASELINE
Definition: videotoolboxenc.c:154
VTH264Entropy
VTH264Entropy
Definition: videotoolboxenc.c:160
sei
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
Definition: cbs_h264_syntax_template.c:924
ExtraSEI::data
void * data
Definition: videotoolboxenc.c:176
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:520
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:65
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:454
get_cv_transfer_function
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
Definition: videotoolboxenc.c:897
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:1688
VT_ENTROPY_NOT_SET
@ VT_ENTROPY_NOT_SET
Definition: videotoolboxenc.c:161
AVPacket::size
int size
Definition: avcodec.h:1478
VTEncContext::realtime
int64_t realtime
Definition: videotoolboxenc.c:213
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1760
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:475
vt_release_num
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
Definition: videotoolboxenc.c:236
kCVImageBufferTransferFunction_ITU_R_2020
CFStringRef kCVImageBufferTransferFunction_ITU_R_2020
Definition: videotoolboxenc.c:57
size
int size
Definition: twinvq_data.h:11134
VTEncContext::allow_sw
int64_t allow_sw
Definition: videotoolboxenc.c:217
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:296
H264_PROF_COUNT
@ H264_PROF_COUNT
Definition: videotoolboxenc.c:157
VTEncContext::frame_ct_out
int64_t frame_ct_out
Definition: videotoolboxenc.c:204
create_cv_pixel_buffer
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
Definition: videotoolboxenc.c:2128
VTEncContext::entropy
int64_t entropy
Definition: videotoolboxenc.c:212
kVTProfileLevel_H264_Baseline_4_2
CFStringRef kVTProfileLevel_H264_Baseline_4_2
Definition: videotoolboxenc.c:65
kVTProfileLevel_H264_Main_5_2
CFStringRef kVTProfileLevel_H264_Main_5_2
Definition: videotoolboxenc.c:72
hevc_videotoolbox_class
static const AVClass hevc_videotoolbox_class
Definition: videotoolboxenc.c:2574
ff_h264_videotoolbox_encoder
AVCodec ff_h264_videotoolbox_encoder
Definition: videotoolboxenc.c:2549
kVTProfileLevel_H264_Main_AutoLevel
CFStringRef kVTProfileLevel_H264_Main_AutoLevel
Definition: videotoolboxenc.c:73
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ExtraSEI
Definition: videotoolboxenc.c:175
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:140
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:469
kVTH264EntropyMode_CABAC
CFStringRef kVTH264EntropyMode_CABAC
Definition: videotoolboxenc.c:62
VTEncContext::get_param_set_func
getParameterSetAtIndex get_param_set_func
Definition: videotoolboxenc.c:194
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:108
kVTProfileLevel_H264_Baseline_AutoLevel
CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel
Definition: videotoolboxenc.c:69
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:504
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
h264_sei.h
BufNode::error
int error
Definition: videotoolboxenc.c:184
vtenc_q_pop
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
Definition: videotoolboxenc.c:271
set_async_error
static void set_async_error(VTEncContext *vtctx, int err)
Definition: videotoolboxenc.c:245
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: videotoolboxenc.c:2499
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:507
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
VTEncContext::a53_cc
bool a53_cc
Definition: videotoolboxenc.c:223
VTEncContext::ycbcr_matrix
CFStringRef ycbcr_matrix
Definition: videotoolboxenc.c:191
VTEncContext::require_sw
int64_t require_sw
Definition: videotoolboxenc.c:218
GET_SYM
#define GET_SYM(symbol, defaultVal)
Definition: videotoolboxenc.c:95
copy_emulation_prev
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
Definition: videotoolboxenc.c:1521
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: avcodec.h:392
kVTProfileLevel_H264_Baseline_5_1
CFStringRef kVTProfileLevel_H264_Baseline_5_1
Definition: videotoolboxenc.c:67
count_nalus
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
Definition: videotoolboxenc.c:348
uint8_t
uint8_t
Definition: audio_convert.c:194
is_post_sei_nal_type
static int is_post_sei_nal_type(int nal_type)
Definition: videotoolboxenc.c:1446
BufNode::next
struct BufNode * next
Definition: videotoolboxenc.c:183
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
pthread_cond_t
Definition: os2threads.h:54
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:499
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
vtenc_q_push
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
Definition: videotoolboxenc.c:321
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
once_ctrl
static pthread_once_t once_ctrl
Definition: videotoolboxenc.c:104
kVTProfileLevel_H264_Baseline_5_0
CFStringRef kVTProfileLevel_H264_Baseline_5_0
Definition: videotoolboxenc.c:66
avcodec.h
ret
ret
Definition: filter_design.txt:187
VTEncContext
Definition: videotoolboxenc.c:187
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
kVTProfileLevel_H264_Baseline_4_0
CFStringRef kVTProfileLevel_H264_Baseline_4_0
Definition: videotoolboxenc.c:64
clear_frame_queue
static void clear_frame_queue(VTEncContext *vtctx)
Definition: videotoolboxenc.c:266
vtenc_configure_encoder
static int vtenc_configure_encoder(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1279
kVTProfileLevel_H264_High_5_2
CFStringRef kVTProfileLevel_H264_High_5_2
Definition: videotoolboxenc.c:81
VTEncContext::session
VTCompressionSessionRef session
Definition: videotoolboxenc.c:190
HEVC_PROF_MAIN
@ HEVC_PROF_MAIN
Definition: videotoolboxenc.c:168
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:148
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
VT_HEVCProfile
VT_HEVCProfile
Definition: videotoolboxenc.c:166
kCVImageBufferColorPrimaries_ITU_R_2020
CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020
Definition: videotoolboxenc.c:56
kVTH264EntropyMode_CAVLC
CFStringRef kVTH264EntropyMode_CAVLC
Definition: videotoolboxenc.c:61
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
kVTProfileLevel_H264_High_3_1
CFStringRef kVTProfileLevel_H264_High_3_1
Definition: videotoolboxenc.c:75
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder
Definition: videotoolboxenc.c:89
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
get_cv_pixel_info
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Definition: videotoolboxenc.c:1929
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
pthread_once_t
Definition: os2threads.h:62
VTEncContext::has_b_frames
bool has_b_frames
Definition: videotoolboxenc.c:221
copy_param_sets
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
Definition: videotoolboxenc.c:458
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:166
get_cv_ycbcr_matrix
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
Definition: videotoolboxenc.c:943
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
OFFSET
#define OFFSET(x)
Definition: videotoolboxenc.c:2511
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1825
kVTProfileLevel_H264_High_5_1
CFStringRef kVTProfileLevel_H264_High_5_1
Definition: videotoolboxenc.c:80
vtenc_send_frame
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
Definition: videotoolboxenc.c:2241
VE
#define VE
Definition: videotoolboxenc.c:2498
kVTProfileLevel_H264_High_4_1
CFStringRef kVTProfileLevel_H264_High_4_1
Definition: videotoolboxenc.c:78
AV_PIX_FMT_P010LE
@ AV_PIX_FMT_P010LE
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:284
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
get_sei_msg_bytes
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
Definition: videotoolboxenc.c:1802
vtenc_frame
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: videotoolboxenc.c:2297
AVFrameSideData::size
int size
Definition: frame.h:204
kVTProfileLevel_H264_High_3_2
CFStringRef kVTProfileLevel_H264_High_3_2
Definition: videotoolboxenc.c:76
kVTProfileLevel_H264_High_3_0
CFStringRef kVTProfileLevel_H264_High_3_0
Definition: videotoolboxenc.c:74
find_sei_end
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
Definition: videotoolboxenc.c:1457
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
VTEncContext::q_tail
BufNode * q_tail
Definition: videotoolboxenc.c:202
h264.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
VTEncContext::codec_id
enum AVCodecID codec_id
Definition: videotoolboxenc.c:189
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:129
VTEncContext::q_head
BufNode * q_head
Definition: videotoolboxenc.c:201
avstring.h
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
H264_PROF_HIGH
@ H264_PROF_HIGH
Definition: videotoolboxenc.c:156
AVColorRange
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:519
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
int
int
Definition: ffmpeg_filter.c:191
kVTProfileLevel_H264_Main_4_2
CFStringRef kVTProfileLevel_H264_Main_4_2
Definition: videotoolboxenc.c:70
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
VT_CAVLC
@ VT_CAVLC
Definition: videotoolboxenc.c:162
PTHREAD_ONCE_INIT
#define PTHREAD_ONCE_INIT
Definition: os2threads.h:67
VTEncContext::color_primaries
CFStringRef color_primaries
Definition: videotoolboxenc.c:192
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
BufNode::cm_buffer
CMSampleBufferRef cm_buffer
Definition: videotoolboxenc.c:181
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:1944
VTEncContext::warned_color_range
bool warned_color_range
Definition: videotoolboxenc.c:222
kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
@ kCVPixelFormatType_420YpCbCr10BiPlanarFullRange
Definition: videotoolboxenc.c:43
get_length_code_size
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
Definition: videotoolboxenc.c:589
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
vtenc_init
static av_cold int vtenc_init(AVCodecContext *avctx)
Definition: videotoolboxenc.c:1389
get_cv_color_primaries
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
Definition: videotoolboxenc.c:871
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:61