21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
43 size_t parameterSetIndex,
44 const uint8_t * _Nullable *parameterSetPointerOut,
45 size_t *parameterSetSizeOut,
46 size_t *parameterSetCountOut,
47 int *NALUnitHeaderLengthOut);
90 #define GET_SYM(symbol, defaultVal) \
92 CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
94 compat_keys.symbol = CFSTR(defaultVal); \
96 compat_keys.symbol = *handle; \
102 compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
105 "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
142 "EnableHardwareAcceleratedVideoEncoder");
144 "RequireHardwareAcceleratedVideoEncoder");
222 CFStringRef profile_level,
223 CFNumberRef gamma_level,
224 CFDictionaryRef enc_info,
225 CFDictionaryRef pixel_buffer_info);
304 }
else if (info->
sei) {
343 CMSampleBufferRef sample_buffer,
350 size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
351 CMBlockBufferRef
block = CMSampleBufferGetDataBuffer(sample_buffer);
353 if (length_code_size > 4)
356 while (offset < src_size) {
361 status = CMBlockBufferCopyDataBytes(block,
366 for (i = 0; i < length_code_size; i++) {
368 box_len |= size_buf[i];
371 curr_src_len = box_len + length_code_size;
372 offset += curr_src_len;
400 CMVideoFormatDescriptionRef vid_fmt,
404 size_t total_size = 0;
406 int is_count_bad = 0;
421 for (i = 0; i < ps_count || is_count_bad; i++) {
435 if (i > 0 && is_count_bad) status = 0;
454 CMVideoFormatDescriptionRef vid_fmt,
460 int is_count_bad = 0;
478 for (i = 0; i < ps_count || is_count_bad; i++) {
490 if (i > 0 && is_count_bad) status = 0;
495 next_offset = offset +
sizeof(
start_code) + ps_size;
496 if (dst_size < next_offset) {
504 memcpy(dst + offset, ps, ps_size);
505 offset = next_offset;
518 CMVideoFormatDescriptionRef vid_fmt;
522 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
552 void *sourceFrameCtx,
554 VTEncodeInfoFlags
flags,
555 CMSampleBufferRef sample_buffer)
562 if(sample_buffer) CFRelease(sample_buffer);
566 if (status || !sample_buffer) {
585 CMSampleBufferRef sample_buffer,
589 CMVideoFormatDescriptionRef vid_fmt;
593 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
621 CFStringRef *profile_level_val)
631 *profile_level_val =
NULL;
638 switch (vtctx->
level) {
639 case 0: *profile_level_val =
640 compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel;
break;
641 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;
break;
642 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;
break;
643 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;
break;
644 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;
break;
645 case 40: *profile_level_val =
646 compat_keys.kVTProfileLevel_H264_Baseline_4_0;
break;
647 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;
break;
648 case 42: *profile_level_val =
649 compat_keys.kVTProfileLevel_H264_Baseline_4_2;
break;
650 case 50: *profile_level_val =
651 compat_keys.kVTProfileLevel_H264_Baseline_5_0;
break;
652 case 51: *profile_level_val =
653 compat_keys.kVTProfileLevel_H264_Baseline_5_1;
break;
654 case 52: *profile_level_val =
655 compat_keys.kVTProfileLevel_H264_Baseline_5_2;
break;
660 switch (vtctx->
level) {
661 case 0: *profile_level_val =
662 compat_keys.kVTProfileLevel_H264_Main_AutoLevel;
break;
663 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;
break;
664 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;
break;
665 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;
break;
666 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;
break;
667 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;
break;
668 case 42: *profile_level_val =
670 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;
break;
671 case 51: *profile_level_val =
673 case 52: *profile_level_val =
679 switch (vtctx->
level) {
680 case 0: *profile_level_val =
681 compat_keys.kVTProfileLevel_H264_High_AutoLevel;
break;
682 case 30: *profile_level_val =
684 case 31: *profile_level_val =
686 case 32: *profile_level_val =
688 case 40: *profile_level_val =
690 case 41: *profile_level_val =
692 case 42: *profile_level_val =
694 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;
break;
695 case 51: *profile_level_val =
697 case 52: *profile_level_val =
703 if (!*profile_level_val) {
718 CFStringRef *profile_level_val)
723 *profile_level_val =
NULL;
738 if (!*profile_level_val) {
749 int* av_pixel_format,
758 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
759 kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
762 kCVPixelFormatType_420YpCbCr8PlanarFullRange :
763 kCVPixelFormatType_420YpCbCr8Planar;
775 CFDictionarySetValue(dict,
776 kCVImageBufferColorPrimariesKey,
781 CFDictionarySetValue(dict,
782 kCVImageBufferTransferFunctionKey,
787 CFDictionarySetValue(dict,
788 kCVImageBufferYCbCrMatrixKey,
794 CFMutableDictionaryRef* dict)
796 CFNumberRef cv_color_format_num =
NULL;
797 CFNumberRef width_num =
NULL;
798 CFNumberRef height_num =
NULL;
799 CFMutableDictionaryRef pixel_buffer_info =
NULL;
806 if (status)
return status;
808 pixel_buffer_info = CFDictionaryCreateMutable(
811 &kCFCopyStringDictionaryKeyCallBacks,
812 &kCFTypeDictionaryValueCallBacks);
814 if (!pixel_buffer_info)
goto pbinfo_nomem;
816 cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
819 if (!cv_color_format_num)
goto pbinfo_nomem;
821 CFDictionarySetValue(pixel_buffer_info,
822 kCVPixelBufferPixelFormatTypeKey,
823 cv_color_format_num);
826 width_num = CFNumberCreate(kCFAllocatorDefault,
829 if (!width_num)
return AVERROR(ENOMEM);
831 CFDictionarySetValue(pixel_buffer_info,
832 kCVPixelBufferWidthKey,
836 height_num = CFNumberCreate(kCFAllocatorDefault,
839 if (!height_num)
goto pbinfo_nomem;
841 CFDictionarySetValue(pixel_buffer_info,
842 kCVPixelBufferHeightKey,
848 *dict = pixel_buffer_info;
855 if (pixel_buffer_info) CFRelease(pixel_buffer_info);
861 CFStringRef *primaries)
870 *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
874 *primaries =
compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
887 CFStringRef *transfer_fnc,
888 CFNumberRef *gamma_level)
896 *transfer_fnc =
NULL;
900 *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
904 *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
909 *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
910 *gamma_level = CFNumberCreate(
NULL, kCFNumberFloat32Type, &gamma);
915 *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
916 *gamma_level = CFNumberCreate(
NULL, kCFNumberFloat32Type, &gamma);
921 *transfer_fnc =
compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
935 *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
944 *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
948 *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
952 *matrix =
compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
965 CFStringRef profile_level,
966 CFNumberRef gamma_level,
967 CFDictionaryRef enc_info,
968 CFDictionaryRef pixel_buffer_info,
969 VTCompressionSessionRef *session)
974 CFNumberRef bit_rate_num;
975 CFNumberRef bytes_per_second;
976 CFNumberRef one_second;
977 CFArrayRef data_rate_limits;
978 int64_t bytes_per_second_value = 0;
979 int64_t one_second_value = 0;
982 int status = VTCompressionSessionCreate(kCFAllocatorDefault,
993 if (status || !vtctx->
session) {
994 av_log(avctx,
AV_LOG_ERROR,
"Error: cannot create compression session: %d\n", status);
996 #if !TARGET_OS_IPHONE
998 av_log(avctx,
AV_LOG_ERROR,
"Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1005 bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1006 kCFNumberSInt32Type,
1008 if (!bit_rate_num)
return AVERROR(ENOMEM);
1010 status = VTSessionSetProperty(vtctx->
session,
1011 kVTCompressionPropertyKey_AverageBitRate,
1013 CFRelease(bit_rate_num);
1022 bytes_per_second_value = max_rate >> 3;
1023 bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1024 kCFNumberSInt64Type,
1025 &bytes_per_second_value);
1026 if (!bytes_per_second) {
1029 one_second_value = 1;
1030 one_second = CFNumberCreate(kCFAllocatorDefault,
1031 kCFNumberSInt64Type,
1034 CFRelease(bytes_per_second);
1037 nums[0] = (
void *)bytes_per_second;
1038 nums[1] = (
void *)one_second;
1039 data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1040 (
const void **)nums,
1042 &kCFTypeArrayCallBacks);
1044 if (!data_rate_limits) {
1045 CFRelease(bytes_per_second);
1046 CFRelease(one_second);
1049 status = VTSessionSetProperty(vtctx->
session,
1050 kVTCompressionPropertyKey_DataRateLimits,
1053 CFRelease(bytes_per_second);
1054 CFRelease(one_second);
1055 CFRelease(data_rate_limits);
1062 if (profile_level) {
1063 status = VTSessionSetProperty(vtctx->
session,
1064 kVTCompressionPropertyKey_ProfileLevel,
1073 CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1080 status = VTSessionSetProperty(vtctx->
session,
1081 kVTCompressionPropertyKey_MaxKeyFrameInterval,
1083 CFRelease(interval);
1086 av_log(avctx,
AV_LOG_ERROR,
"Error setting 'max key-frame interval' property: %d\n", status);
1092 status = VTSessionSetProperty(vtctx->
session,
1093 kVTCompressionPropertyKey_MoreFramesBeforeStart,
1096 if (status == kVTPropertyNotSupportedErr) {
1097 av_log(avctx,
AV_LOG_WARNING,
"frames_before property is not supported on this device. Ignoring.\n");
1098 }
else if (status) {
1104 status = VTSessionSetProperty(vtctx->
session,
1105 kVTCompressionPropertyKey_MoreFramesAfterEnd,
1108 if (status == kVTPropertyNotSupportedErr) {
1109 av_log(avctx,
AV_LOG_WARNING,
"frames_after property is not supported on this device. Ignoring.\n");
1110 }
else if (status) {
1118 CFMutableDictionaryRef par;
1125 num = CFNumberCreate(kCFAllocatorDefault,
1129 den = CFNumberCreate(kCFAllocatorDefault,
1135 par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1137 &kCFCopyStringDictionaryKeyCallBacks,
1138 &kCFTypeDictionaryValueCallBacks);
1140 if (!par || !num || !den) {
1141 if (par) CFRelease(par);
1142 if (num) CFRelease(num);
1143 if (den) CFRelease(den);
1148 CFDictionarySetValue(
1150 kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1153 CFDictionarySetValue(
1155 kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1158 status = VTSessionSetProperty(vtctx->
session,
1159 kVTCompressionPropertyKey_PixelAspectRatio,
1169 "Error setting pixel aspect ratio to %d:%d: %d.\n",
1180 status = VTSessionSetProperty(vtctx->
session,
1181 kVTCompressionPropertyKey_TransferFunction,
1191 status = VTSessionSetProperty(vtctx->
session,
1192 kVTCompressionPropertyKey_YCbCrMatrix,
1202 status = VTSessionSetProperty(vtctx->
session,
1203 kVTCompressionPropertyKey_ColorPrimaries,
1212 status = VTSessionSetProperty(vtctx->
session,
1213 kCVImageBufferGammaLevelKey,
1222 status = VTSessionSetProperty(vtctx->
session,
1223 kVTCompressionPropertyKey_AllowFrameReordering,
1227 av_log(avctx,
AV_LOG_ERROR,
"Error setting 'allow frame reordering' property: %d\n", status);
1237 status = VTSessionSetProperty(vtctx->
session,
1238 compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1247 status = VTSessionSetProperty(vtctx->
session,
1256 status = VTCompressionSessionPrepareToEncodeFrames(vtctx->
session);
1267 CFMutableDictionaryRef enc_info;
1268 CFMutableDictionaryRef pixel_buffer_info;
1271 CFStringRef profile_level;
1272 CFBooleanRef has_b_frames_cfbool;
1273 CFNumberRef gamma_level =
NULL;
1291 av_log(avctx,
AV_LOG_WARNING,
"Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1296 av_log(avctx,
AV_LOG_WARNING,
"CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1309 enc_info = CFDictionaryCreateMutable(
1310 kCFAllocatorDefault,
1312 &kCFCopyStringDictionaryKeyCallBacks,
1313 &kCFTypeDictionaryValueCallBacks
1316 if (!enc_info)
return AVERROR(ENOMEM);
1318 #if !TARGET_OS_IPHONE
1320 CFDictionarySetValue(enc_info,
1321 compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1324 CFDictionarySetValue(enc_info,
1325 compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1335 pixel_buffer_info =
NULL;
1369 status = VTSessionCopyProperty(vtctx->
session,
1370 kVTCompressionPropertyKey_AllowFrameReordering,
1371 kCFAllocatorDefault,
1372 &has_b_frames_cfbool);
1374 if (!status && has_b_frames_cfbool) {
1376 vtctx->
has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1377 CFRelease(has_b_frames_cfbool);
1383 CFRelease(gamma_level);
1385 if (pixel_buffer_info)
1386 CFRelease(pixel_buffer_info);
1388 CFRelease(enc_info);
1395 CFArrayRef attachments;
1396 CFDictionaryRef attachment;
1397 CFBooleanRef not_sync;
1400 attachments = CMSampleBufferGetSampleAttachmentsArray(buffer,
false);
1401 len = !attachments ? 0 : CFArrayGetCount(attachments);
1404 *is_key_frame =
true;
1408 attachment = CFArrayGetValueAtIndex(attachments, 0);
1410 if (CFDictionaryGetValueIfPresent(attachment,
1411 kCMSampleAttachmentKey_NotSync,
1412 (
const void **)¬_sync))
1414 *is_key_frame = !CFBooleanGetValue(not_sync);
1416 *is_key_frame =
true;
1437 size_t sei_payload_size = 0;
1438 int sei_payload_type = 0;
1440 uint8_t *nal_start = nal_data;
1445 nal_type = *nal_data & 0x1F;
1452 if (nal_data[nal_size - 1] == 0x80)
1455 while (nal_size > 0 && *nal_data > 0) {
1457 sei_payload_type += *nal_data;
1460 }
while (nal_size > 0 && *nal_data == 0xFF);
1468 sei_payload_size += *nal_data;
1471 }
while (nal_size > 0 && *nal_data == 0xFF);
1473 if (nal_size < sei_payload_size) {
1478 nal_data += sei_payload_size;
1479 nal_size -= sei_payload_size;
1482 *sei_end = nal_data;
1484 return nal_data - nal_start + 1;
1504 uint8_t* dst_end = dst + dst_size;
1505 const uint8_t* src_end = src + src_size;
1506 int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1508 for (i = start_at; i < dst_offset && i < dst_size; i++) {
1517 for (; src < src_end; src++, dst++) {
1519 int insert_ep3_byte = *src <= 3;
1520 if (insert_ep3_byte) {
1538 wrote_bytes = dst - dst_start;
1541 return -wrote_bytes;
1552 size_t remaining_sei_size = sei->
size;
1553 size_t remaining_dst_size = dst_size;
1558 if (!remaining_dst_size)
1561 while (sei_type && remaining_dst_size != 0) {
1562 int sei_byte = sei_type > 255 ? 255 : sei_type;
1565 sei_type -= sei_byte;
1567 remaining_dst_size--;
1573 while (remaining_sei_size && remaining_dst_size != 0) {
1574 int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1577 remaining_sei_size -= size_byte;
1579 remaining_dst_size--;
1582 if (remaining_dst_size < sei->
size)
1585 header_bytes = dst - sei_start;
1587 offset = header_bytes;
1593 if (bytes_written < 0)
1596 bytes_written += header_bytes;
1597 return bytes_written;
1621 size_t length_code_size,
1622 CMSampleBufferRef sample_buffer,
1627 size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1628 size_t remaining_src_size = src_size;
1629 size_t remaining_dst_size = dst_size;
1630 size_t src_offset = 0;
1635 CMBlockBufferRef
block = CMSampleBufferGetDataBuffer(sample_buffer);
1637 if (length_code_size > 4) {
1641 while (remaining_src_size > 0) {
1642 size_t curr_src_len;
1643 size_t curr_dst_len;
1649 status = CMBlockBufferCopyDataBytes(block,
1658 status = CMBlockBufferCopyDataBytes(block,
1659 src_offset + length_code_size,
1670 for (i = 0; i < length_code_size; i++) {
1672 box_len |= size_buf[i];
1685 remaining_dst_size--;
1690 remaining_dst_size);
1692 if (wrote_bytes < 0)
1695 remaining_dst_size -= wrote_bytes;
1696 dst_data += wrote_bytes;
1698 if (remaining_dst_size <= 0)
1704 remaining_dst_size--;
1709 curr_src_len = box_len + length_code_size;
1712 if (remaining_src_size < curr_src_len) {
1716 if (remaining_dst_size < curr_dst_len) {
1723 status = CMBlockBufferCopyDataBytes(block,
1724 src_offset + length_code_size,
1739 old_sei_length =
find_sei_end(avctx, dst_box, box_len, &new_sei);
1740 if (old_sei_length < 0)
1746 remaining_dst_size - old_sei_length);
1747 if (wrote_bytes < 0)
1750 if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1753 new_sei[wrote_bytes++] = 0x80;
1754 extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1756 dst_data += extra_bytes;
1757 remaining_dst_size -= extra_bytes;
1762 src_offset += curr_src_len;
1763 dst_data += curr_dst_len;
1765 remaining_src_size -= curr_src_len;
1766 remaining_dst_size -= curr_dst_len;
1787 if ((sei->
size % 255) == 0)
1790 return copied_size + sei->
size / 255 + 1 + type / 255 + 1;
1795 CMSampleBufferRef sample_buffer,
1804 size_t length_code_size;
1805 size_t header_size = 0;
1807 size_t out_buf_size;
1808 size_t sei_nalu_size = 0;
1810 int64_t time_base_num;
1814 CMVideoFormatDescriptionRef vid_fmt;
1819 if (status)
return status;
1824 vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1831 if (status)
return status;
1834 status =
count_nalus(length_code_size, sample_buffer, &nalu_count);
1842 sei_nalu_size =
sizeof(
start_code) + 1 + msg_size + 1;
1845 in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1846 out_buf_size = header_size +
1857 if(status)
return status;
1865 pkt->
data + header_size,
1866 pkt->
size - header_size
1878 pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1879 dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1881 if (CMTIME_IS_INVALID(dts)) {
1892 pkt->
pts = pts.value / time_base_num;
1893 pkt->
dts = dts.value / time_base_num - dts_delta;
1894 pkt->
size = out_buf_size;
1911 size_t *contiguous_buf_size)
1914 int av_format = frame->
format;
1924 "Could not get pixel format for color format '%s' range '%s'.\n",
1934 if (range_guessed) {
1939 "Color range not set for %s. Using MPEG range.\n",
1944 switch (av_format) {
1948 widths [0] = avctx->
width;
1949 heights[0] = avctx->
height;
1952 widths [1] = (avctx->
width + 1) / 2;
1953 heights[1] = (avctx->
height + 1) / 2;
1954 strides[1] = frame ? frame->
linesize[1] : (avctx->
width + 1) & -2;
1960 widths [0] = avctx->
width;
1961 heights[0] = avctx->
height;
1964 widths [1] = (avctx->
width + 1) / 2;
1965 heights[1] = (avctx->
height + 1) / 2;
1966 strides[1] = frame ? frame->
linesize[1] : (avctx->
width + 1) / 2;
1968 widths [2] = (avctx->
width + 1) / 2;
1969 heights[2] = (avctx->
height + 1) / 2;
1970 strides[2] = frame ? frame->
linesize[2] : (avctx->
width + 1) / 2;
1977 "Could not get frame format info for color %d range %d.\n",
1984 *contiguous_buf_size = 0;
1985 for (i = 0; i < *plane_count; i++) {
1986 if (i < *plane_count - 1 &&
1987 frame->
data[i] + strides[i] * heights[i] != frame->
data[i + 1]) {
1988 *contiguous_buf_size = 0;
1992 *contiguous_buf_size += strides[i] * heights[i];
1998 #if !TARGET_OS_IPHONE
2005 const void *plane_addresses[])
2014 CVPixelBufferRef cv_img,
2015 const size_t *plane_strides,
2016 const size_t *plane_rows)
2028 status = CVPixelBufferLockBaseAddress(cv_img, 0);
2033 "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2038 if (CVPixelBufferIsPlanar(cv_img)) {
2039 plane_count = CVPixelBufferGetPlaneCount(cv_img);
2040 for (i = 0; frame->
data[i]; i++) {
2041 if (i == plane_count) {
2042 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2045 "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2051 dst_addr = (
uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2053 dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2054 src_stride = plane_strides[i];
2055 rows = plane_rows[i];
2057 if (dst_stride == src_stride) {
2058 memcpy(dst_addr, src_addr, src_stride * rows);
2060 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2062 for (j = 0; j < rows; j++) {
2063 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2068 if (frame->
data[1]) {
2069 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2072 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2078 dst_addr = (
uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2080 dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2081 src_stride = plane_strides[0];
2082 rows = plane_rows[0];
2084 if (dst_stride == src_stride) {
2085 memcpy(dst_addr, src_addr, src_stride * rows);
2087 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2089 for (j = 0; j < rows; j++) {
2090 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2095 status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2097 av_log(avctx,
AV_LOG_ERROR,
"Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2107 CVPixelBufferRef *cv_img)
2115 size_t contiguous_buf_size;
2116 #if TARGET_OS_IPHONE
2117 CVPixelBufferPoolRef pix_buf_pool;
2120 CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
2121 kCFAllocatorDefault,
2123 &kCFCopyStringDictionaryKeyCallBacks,
2124 &kCFTypeDictionaryValueCallBacks);
2126 if (!pix_buf_attachments)
return AVERROR(ENOMEM);
2132 *cv_img = (CVPixelBufferRef)frame->
data[3];
2139 memset(widths, 0,
sizeof(widths));
2140 memset(heights, 0,
sizeof(heights));
2141 memset(strides, 0,
sizeof(strides));
2151 &contiguous_buf_size
2158 "Error: Cannot convert format %d color_range %d: %d\n",
2167 #if TARGET_OS_IPHONE
2168 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->
session);
2169 if (!pix_buf_pool) {
2174 status = CVPixelBufferPoolCreatePixelBuffer(
NULL,
2180 av_log(avctx,
AV_LOG_ERROR,
"Could not create pixel buffer from pool: %d.\n", status);
2184 status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2192 if (!enc_frame)
return AVERROR(ENOMEM);
2200 status = CVPixelBufferCreateWithPlanarBytes(
2201 kCFAllocatorDefault,
2206 contiguous_buf_size,
2208 (
void **)enc_frame->
data,
2219 CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
2220 CFRelease(pix_buf_attachments);
2232 CFDictionaryRef* dict_out)
2234 CFDictionaryRef dict =
NULL;
2236 const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2237 const void *vals[] = { kCFBooleanTrue };
2239 dict = CFDictionaryCreate(
NULL, keys, vals, 1,
NULL,
NULL);
2240 if(!dict)
return AVERROR(ENOMEM);
2252 CFDictionaryRef frame_dict;
2253 CVPixelBufferRef cv_img =
NULL;
2258 if (status)
return status;
2267 if (vtctx->
a53_cc && side_data && side_data->
size) {
2282 status = VTCompressionSessionEncodeFrame(
2292 if (frame_dict) CFRelease(frame_dict);
2333 status = VTCompressionSessionCompleteFrames(vtctx->
session,
2351 if (status)
goto end_nopkt;
2352 if (!buf)
goto end_nopkt;
2360 if (status)
goto end_nopkt;
2372 CFStringRef profile_level,
2373 CFNumberRef gamma_level,
2374 CFDictionaryRef enc_info,
2375 CFDictionaryRef pixel_buffer_info)
2380 int chroma_size = (avctx->
width / 2) * (avctx->
height / 2);
2405 memset(frame->
data[0], 0, y_size);
2408 memset(frame->
data[1], 128, chroma_size);
2412 frame->
data[2] = frame->
buf[0]->
data + y_size + chroma_size;
2413 memset(frame->
data[2], 128, chroma_size);
2441 status = VTCompressionSessionCompleteFrames(vtctx->
session,
2478 VTCompressionSessionCompleteFrames(vtctx->
session,
2511 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2512 #define COMMON_OPTIONS \
2513 { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2514 { .i64 = 0 }, 0, 1, VE }, \
2515 { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2516 OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2517 { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2518 OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2519 { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2520 OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2522 #define OFFSET(x) offsetof(VTEncContext, x)
2530 {
"1.3",
"Level 1.3, only available with Baseline Profile", 0,
AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX,
VE,
"level" },
2531 {
"3.0",
"Level 3.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX,
VE,
"level" },
2532 {
"3.1",
"Level 3.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX,
VE,
"level" },
2533 {
"3.2",
"Level 3.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX,
VE,
"level" },
2534 {
"4.0",
"Level 4.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX,
VE,
"level" },
2535 {
"4.1",
"Level 4.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX,
VE,
"level" },
2536 {
"4.2",
"Level 4.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX,
VE,
"level" },
2537 {
"5.0",
"Level 5.0", 0,
AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX,
VE,
"level" },
2538 {
"5.1",
"Level 5.1", 0,
AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX,
VE,
"level" },
2539 {
"5.2",
"Level 5.2", 0,
AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX,
VE,
"level" },
2561 .
name =
"h264_videotoolbox",
2566 .pix_fmts = pix_fmts,
2571 .priv_class = &h264_videotoolbox_class,
2593 .
name =
"hevc_videotoolbox",
2598 .pix_fmts = pix_fmts,
2603 .priv_class = &hevc_videotoolbox_class,
2606 .wrapper_name =
"videotoolbox",
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
ITU-R BT2020 for 12-bit system.
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_NUM_DATA_POINTERS
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
pthread_cond_t cv_sample_sent
This structure describes decoded (raw) audio or video data.
#define pthread_mutex_lock(a)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
ptrdiff_t const GLvoid * data
#define AV_LOG_WARNING
Something somehow does not look correct.
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
hardware decoding through Videotoolbox
static av_cold int init(AVCodecContext *avctx)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
const char * av_default_item_name(void *ptr)
Return the context name.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVMediaType codec_type
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
AVColorTransferCharacteristic
Color Transfer Characteristic.
functionally identical to above
const char * av_color_space_name(enum AVColorSpace space)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
const char * av_color_range_name(enum AVColorRange range)
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Structure to hold side data for an AVFrame.
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
AVColorRange
MPEG vs JPEG YUV range.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
H.264 common definitions.
AVCodecID
Identify the syntax and semantics of the bitstream.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
enum AVColorRange color_range
MPEG vs JPEG YUV range.
ATSC A53 Part 4 Closed Captions.
int flags
AV_CODEC_FLAG_*.
enum AVColorSpace colorspace
YUV colorspace type.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
static int get_frame(AVFilterContext *ctx, int is_second)
int flags
A combination of AV_PKT_FLAG values.
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
VTCompressionSessionRef session
CFStringRef color_primaries
enum AVPictureType pict_type
Picture type of the frame.
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
CMSampleBufferRef cm_buffer
int width
picture width / height.
ITU-R BT2020 non-constant luminance system.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
#define pthread_mutex_unlock(a)
the normal 2^n-1 "JPEG" YUV ranges
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Libavcodec external API header.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
Describe the class of an AVClass context structure.
CFStringRef transfer_function
enum AVColorSpace colorspace
YUV colorspace type.
Rational number (pair of numerator and denominator).
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
registered user data as specified by Rec. ITU-T T.35
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
the normal 219*2^(n-8) "MPEG" YUV ranges
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
getParameterSetAtIndex get_param_set_func
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static int FUNC() sei(CodedBitstreamContext *ctx, RWContext *rw, H264RawSEI *current)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define PTHREAD_ONCE_INIT
enum AVColorPrimaries color_primaries
ITU-R BT2020 for 10-bit system.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
enum AVColorTransferCharacteristic color_trc
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
static av_always_inline int pthread_once(pthread_once_t *once_control, void(*init_routine)(void))
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int64_t rc_max_rate
maximum bitrate