FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/pixdesc.h"
30 #include "bytestream.h"
31 #include "decode.h"
32 #include "h264dec.h"
33 #include "hevcdec.h"
34 #include "mpegvideo.h"
35 #include <TargetConditionals.h>
36 
37 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
38 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
39 #endif
40 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
41 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
42 #endif
43 
44 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
45 enum { kCMVideoCodecType_HEVC = 'hvc1' };
46 #endif
47 
48 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
49 
50 typedef struct VTHWFrame {
51  CVPixelBufferRef pixbuf;
53 } VTHWFrame;
54 
55 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
56 {
57  VTHWFrame *ref = (VTHWFrame *)data;
59  CVPixelBufferRelease(ref->pixbuf);
60 
61  av_free(data);
62 }
63 
65  const uint8_t *buffer,
66  uint32_t size)
67 {
68  void *tmp;
69 
70  tmp = av_fast_realloc(vtctx->bitstream,
71  &vtctx->allocated_size,
72  size);
73 
74  if (!tmp)
75  return AVERROR(ENOMEM);
76 
77  vtctx->bitstream = tmp;
78  memcpy(vtctx->bitstream, buffer, size);
79  vtctx->bitstream_size = size;
80 
81  return 0;
82 }
83 
84 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
85 {
86  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
87 
88  if (!ref->pixbuf) {
89  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
90  av_frame_unref(frame);
91  return AVERROR_EXTERNAL;
92  }
93 
94  frame->data[3] = (uint8_t*)ref->pixbuf;
95 
96  if (ref->hw_frames_ctx) {
99  if (!frame->hw_frames_ctx)
100  return AVERROR(ENOMEM);
101  }
102 
103  return 0;
104 }
105 
107 {
108  size_t size = sizeof(VTHWFrame);
109  uint8_t *data = NULL;
110  AVBufferRef *buf = NULL;
111  int ret = ff_attach_decode_data(frame);
112  FrameDecodeData *fdd;
113  if (ret < 0)
114  return ret;
115 
116  data = av_mallocz(size);
117  if (!data)
118  return AVERROR(ENOMEM);
119  buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
120  if (!buf) {
121  av_freep(&data);
122  return AVERROR(ENOMEM);
123  }
124  frame->buf[0] = buf;
125 
126  fdd = (FrameDecodeData*)frame->private_ref->data;
128 
129  frame->width = avctx->width;
130  frame->height = avctx->height;
131  frame->format = avctx->pix_fmt;
132 
133  return 0;
134 }
135 
136 #define AV_W8(p, v) *(p) = (v)
137 
139 {
140  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
141  H264Context *h = avctx->priv_data;
142  CFDataRef data = NULL;
143  uint8_t *p;
144  int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
145  uint8_t *vt_extradata = av_malloc(vt_extradata_size);
146  if (!vt_extradata)
147  return NULL;
148 
149  p = vt_extradata;
150 
151  AV_W8(p + 0, 1); /* version */
152  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
153  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
154  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
155  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
156  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
157  AV_WB16(p + 6, h->ps.sps->data_size);
158  memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
159  p += 8 + h->ps.sps->data_size;
160  AV_W8(p + 0, 1); /* number of pps */
161  AV_WB16(p + 1, h->ps.pps->data_size);
162  memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
163 
164  p += 3 + h->ps.pps->data_size;
165  av_assert0(p - vt_extradata == vt_extradata_size);
166 
167  // save sps header (profile/level) used to create decoder session,
168  // so we can detect changes and recreate it.
169  if (vtctx)
170  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
171 
172  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
173  av_free(vt_extradata);
174  return data;
175 }
176 
178 {
179  HEVCContext *h = avctx->priv_data;
180  int i, num_vps = 0, num_sps = 0, num_pps = 0;
181  const HEVCVPS *vps = h->ps.vps;
182  const HEVCSPS *sps = h->ps.sps;
183  const HEVCPPS *pps = h->ps.pps;
184  PTLCommon ptlc = vps->ptl.general_ptl;
185  VUI vui = sps->vui;
186  uint8_t parallelismType;
187  CFDataRef data = NULL;
188  uint8_t *p;
189  int vt_extradata_size = 23 + 3 + 3 + 3;
190  uint8_t *vt_extradata;
191 
192 #define COUNT_SIZE_PS(T, t) \
193  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
194  if (h->ps.t##ps_list[i]) { \
195  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
196  vt_extradata_size += 2 + lps->data_size; \
197  num_##t##ps++; \
198  } \
199  }
200 
201  COUNT_SIZE_PS(V, v)
202  COUNT_SIZE_PS(S, s)
203  COUNT_SIZE_PS(P, p)
204 
205  vt_extradata = av_malloc(vt_extradata_size);
206  if (!vt_extradata)
207  return NULL;
208  p = vt_extradata;
209 
210  /* unsigned int(8) configurationVersion = 1; */
211  AV_W8(p + 0, 1);
212 
213  /*
214  * unsigned int(2) general_profile_space;
215  * unsigned int(1) general_tier_flag;
216  * unsigned int(5) general_profile_idc;
217  */
218  AV_W8(p + 1, ptlc.profile_space << 6 |
219  ptlc.tier_flag << 5 |
220  ptlc.profile_idc);
221 
222  /* unsigned int(32) general_profile_compatibility_flags; */
223  memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
224 
225  /* unsigned int(48) general_constraint_indicator_flags; */
226  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
227  ptlc.interlaced_source_flag << 6 |
228  ptlc.non_packed_constraint_flag << 5 |
229  ptlc.frame_only_constraint_flag << 4);
230  AV_W8(p + 7, 0);
231  AV_WN32(p + 8, 0);
232 
233  /* unsigned int(8) general_level_idc; */
234  AV_W8(p + 12, ptlc.level_idc);
235 
236  /*
237  * bit(4) reserved = ‘1111’b;
238  * unsigned int(12) min_spatial_segmentation_idc;
239  */
240  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
241  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
242 
243  /*
244  * bit(6) reserved = ‘111111’b;
245  * unsigned int(2) parallelismType;
246  */
248  parallelismType = 0;
250  parallelismType = 0;
251  else if (pps->entropy_coding_sync_enabled_flag)
252  parallelismType = 3;
253  else if (pps->tiles_enabled_flag)
254  parallelismType = 2;
255  else
256  parallelismType = 1;
257  AV_W8(p + 15, 0xfc | parallelismType);
258 
259  /*
260  * bit(6) reserved = ‘111111’b;
261  * unsigned int(2) chromaFormat;
262  */
263  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
264 
265  /*
266  * bit(5) reserved = ‘11111’b;
267  * unsigned int(3) bitDepthLumaMinus8;
268  */
269  AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
270 
271  /*
272  * bit(5) reserved = ‘11111’b;
273  * unsigned int(3) bitDepthChromaMinus8;
274  */
275  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
276 
277  /* bit(16) avgFrameRate; */
278  AV_WB16(p + 19, 0);
279 
280  /*
281  * bit(2) constantFrameRate;
282  * bit(3) numTemporalLayers;
283  * bit(1) temporalIdNested;
284  * unsigned int(2) lengthSizeMinusOne;
285  */
286  AV_W8(p + 21, 0 << 6 |
287  sps->max_sub_layers << 3 |
288  sps->temporal_id_nesting_flag << 2 |
289  3);
290 
291  /* unsigned int(8) numOfArrays; */
292  AV_W8(p + 22, 3);
293 
294  p += 23;
295 
296 #define APPEND_PS(T, t) \
297  /* \
298  * bit(1) array_completeness; \
299  * unsigned int(1) reserved = 0; \
300  * unsigned int(6) NAL_unit_type; \
301  */ \
302  AV_W8(p, 1 << 7 | \
303  HEVC_NAL_##T##PS & 0x3f); \
304  /* unsigned int(16) numNalus; */ \
305  AV_WB16(p + 1, num_##t##ps); \
306  p += 3; \
307  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
308  if (h->ps.t##ps_list[i]) { \
309  const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
310  /* unsigned int(16) nalUnitLength; */ \
311  AV_WB16(p, lps->data_size); \
312  /* bit(8*nalUnitLength) nalUnit; */ \
313  memcpy(p + 2, lps->data, lps->data_size); \
314  p += 2 + lps->data_size; \
315  } \
316  }
317 
318  APPEND_PS(V, v)
319  APPEND_PS(S, s)
320  APPEND_PS(P, p)
321 
322  av_assert0(p - vt_extradata == vt_extradata_size);
323 
324  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
325  av_free(vt_extradata);
326  return data;
327 }
328 
330  const uint8_t *buffer,
331  uint32_t size)
332 {
333  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
334  H264Context *h = avctx->priv_data;
335 
336  if (h->is_avc == 1) {
337  return videotoolbox_buffer_copy(vtctx, buffer, size);
338  }
339 
340  return 0;
341 }
342 
344  int type,
345  const uint8_t *buffer,
346  uint32_t size)
347 {
348  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
349  H264Context *h = avctx->priv_data;
350 
351  // save sps header (profile/level) used to create decoder session
352  if (!vtctx->sps[0])
353  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
354 
355  if (type == H264_NAL_SPS) {
356  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
357  vtctx->reconfig_needed = true;
358  memcpy(vtctx->sps, buffer + 1, 3);
359  }
360  }
361 
362  // pass-through SPS/PPS changes to the decoder
363  return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
364 }
365 
367  const uint8_t *buffer,
368  uint32_t size)
369 {
370  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
371  void *tmp;
372 
373  tmp = av_fast_realloc(vtctx->bitstream,
374  &vtctx->allocated_size,
375  vtctx->bitstream_size+size+4);
376  if (!tmp)
377  return AVERROR(ENOMEM);
378 
379  vtctx->bitstream = tmp;
380 
381  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
382  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
383 
384  vtctx->bitstream_size += size + 4;
385 
386  return 0;
387 }
388 
390  const uint8_t *buffer,
391  uint32_t size)
392 {
393  H264Context *h = avctx->priv_data;
394 
395  if (h->is_avc == 1)
396  return 0;
397 
398  return videotoolbox_common_decode_slice(avctx, buffer, size);
399 }
400 
402 {
403  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
404  if (vtctx) {
405  av_freep(&vtctx->bitstream);
406  if (vtctx->frame)
407  CVPixelBufferRelease(vtctx->frame);
408  }
409 
410  return 0;
411 }
412 
413 #if CONFIG_VIDEOTOOLBOX
414 // Return the AVVideotoolboxContext that matters currently. Where it comes from
415 // depends on the API used.
416 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
417 {
418  // Somewhat tricky because the user can call av_videotoolbox_default_free()
419  // at any time, even when the codec is closed.
420  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
421  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
422  if (vtctx->vt_ctx)
423  return vtctx->vt_ctx;
424  }
425  return avctx->hwaccel_context;
426 }
427 
428 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
429 {
430  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
431  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
432  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
433  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
434  int width = CVPixelBufferGetWidth(pixbuf);
435  int height = CVPixelBufferGetHeight(pixbuf);
436  AVHWFramesContext *cached_frames;
437  VTHWFrame *ref;
438  int ret;
439 
440  if (!frame->buf[0] || frame->data[3]) {
441  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
442  av_frame_unref(frame);
443  return AVERROR_EXTERNAL;
444  }
445 
446  ref = (VTHWFrame *)frame->buf[0]->data;
447 
448  if (ref->pixbuf)
449  CVPixelBufferRelease(ref->pixbuf);
450  ref->pixbuf = vtctx->frame;
451  vtctx->frame = NULL;
452 
453  // Old API code path.
454  if (!vtctx->cached_hw_frames_ctx)
455  return 0;
456 
457  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
458 
459  if (cached_frames->sw_format != sw_format ||
460  cached_frames->width != width ||
461  cached_frames->height != height) {
463  AVHWFramesContext *hw_frames;
464  if (!hw_frames_ctx)
465  return AVERROR(ENOMEM);
466 
467  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
468  hw_frames->format = cached_frames->format;
469  hw_frames->sw_format = sw_format;
470  hw_frames->width = width;
471  hw_frames->height = height;
472 
473  ret = av_hwframe_ctx_init(hw_frames_ctx);
474  if (ret < 0) {
475  av_buffer_unref(&hw_frames_ctx);
476  return ret;
477  }
478 
481  }
482 
485  if (!ref->hw_frames_ctx)
486  return AVERROR(ENOMEM);
487 
488  return 0;
489 }
490 
491 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
492 {
493  int i;
494  uint8_t b;
495 
496  for (i = 3; i >= 0; i--) {
497  b = (length >> (i * 7)) & 0x7F;
498  if (i != 0)
499  b |= 0x80;
500 
501  bytestream2_put_byteu(pb, b);
502  }
503 }
504 
505 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
506 {
507  CFDataRef data;
508  uint8_t *rw_extradata;
509  PutByteContext pb;
510  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
511  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
512  int config_size = 13 + 5 + avctx->extradata_size;
513  int s;
514 
515  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
516  return NULL;
517 
518  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
519  bytestream2_put_byteu(&pb, 0); // version
520  bytestream2_put_ne24(&pb, 0); // flags
521 
522  // elementary stream descriptor
523  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
524  videotoolbox_write_mp4_descr_length(&pb, full_size);
525  bytestream2_put_ne16(&pb, 0); // esid
526  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
527 
528  // decoder configuration descriptor
529  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
530  videotoolbox_write_mp4_descr_length(&pb, config_size);
531  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
532  bytestream2_put_byteu(&pb, 0x11); // stream type
533  bytestream2_put_ne24(&pb, 0); // buffer size
534  bytestream2_put_ne32(&pb, 0); // max bitrate
535  bytestream2_put_ne32(&pb, 0); // avg bitrate
536 
537  // decoder specific descriptor
538  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
539  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
540 
541  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
542 
543  // SLConfigDescriptor
544  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
545  bytestream2_put_byteu(&pb, 0x01); // length
546  bytestream2_put_byteu(&pb, 0x02); //
547 
548  s = bytestream2_size_p(&pb);
549 
550  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
551 
552  av_freep(&rw_extradata);
553  return data;
554 }
555 
556 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
557  void *buffer,
558  int size)
559 {
560  OSStatus status;
561  CMBlockBufferRef block_buf;
562  CMSampleBufferRef sample_buf;
563 
564  block_buf = NULL;
565  sample_buf = NULL;
566 
567  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
568  buffer, // memoryBlock
569  size, // blockLength
570  kCFAllocatorNull, // blockAllocator
571  NULL, // customBlockSource
572  0, // offsetToData
573  size, // dataLength
574  0, // flags
575  &block_buf);
576 
577  if (!status) {
578  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
579  block_buf, // dataBuffer
580  TRUE, // dataReady
581  0, // makeDataReadyCallback
582  0, // makeDataReadyRefcon
583  fmt_desc, // formatDescription
584  1, // numSamples
585  0, // numSampleTimingEntries
586  NULL, // sampleTimingArray
587  0, // numSampleSizeEntries
588  NULL, // sampleSizeArray
589  &sample_buf);
590  }
591 
592  if (block_buf)
593  CFRelease(block_buf);
594 
595  return sample_buf;
596 }
597 
598 static void videotoolbox_decoder_callback(void *opaque,
599  void *sourceFrameRefCon,
600  OSStatus status,
601  VTDecodeInfoFlags flags,
602  CVImageBufferRef image_buffer,
603  CMTime pts,
604  CMTime duration)
605 {
606  AVCodecContext *avctx = opaque;
607  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
608 
609  if (vtctx->frame) {
610  CVPixelBufferRelease(vtctx->frame);
611  vtctx->frame = NULL;
612  }
613 
614  if (!image_buffer) {
615  av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
616  return;
617  }
618 
619  vtctx->frame = CVPixelBufferRetain(image_buffer);
620 }
621 
622 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
623 {
624  OSStatus status;
625  CMSampleBufferRef sample_buf;
626  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
627  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
628 
629  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
630  vtctx->bitstream,
631  vtctx->bitstream_size);
632 
633  if (!sample_buf)
634  return -1;
635 
636  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
637  sample_buf,
638  0, // decodeFlags
639  NULL, // sourceFrameRefCon
640  0); // infoFlagsOut
641  if (status == noErr)
642  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
643 
644  CFRelease(sample_buf);
645 
646  return status;
647 }
648 
649 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
650  CFDictionaryRef decoder_spec,
651  int width,
652  int height)
653 {
654  CMFormatDescriptionRef cm_fmt_desc;
655  OSStatus status;
656 
657  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
658  codec_type,
659  width,
660  height,
661  decoder_spec, // Dictionary of extension
662  &cm_fmt_desc);
663 
664  if (status)
665  return NULL;
666 
667  return cm_fmt_desc;
668 }
669 
670 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
671  int height,
672  OSType pix_fmt)
673 {
674  CFMutableDictionaryRef buffer_attributes;
675  CFMutableDictionaryRef io_surface_properties;
676  CFNumberRef cv_pix_fmt;
677  CFNumberRef w;
678  CFNumberRef h;
679 
680  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
681  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
682  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
683 
684  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
685  4,
686  &kCFTypeDictionaryKeyCallBacks,
687  &kCFTypeDictionaryValueCallBacks);
688  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
689  0,
690  &kCFTypeDictionaryKeyCallBacks,
691  &kCFTypeDictionaryValueCallBacks);
692 
693  if (pix_fmt)
694  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
695  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
696  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
697  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
698 #if TARGET_OS_IPHONE
699  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
700 #else
701  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
702 #endif
703 
704  CFRelease(io_surface_properties);
705  CFRelease(cv_pix_fmt);
706  CFRelease(w);
707  CFRelease(h);
708 
709  return buffer_attributes;
710 }
711 
712 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
713  AVCodecContext *avctx)
714 {
715  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
716  0,
717  &kCFTypeDictionaryKeyCallBacks,
718  &kCFTypeDictionaryValueCallBacks);
719 
720  CFDictionarySetValue(config_info,
724  kCFBooleanTrue);
725 
726  CFMutableDictionaryRef avc_info;
727  CFDataRef data = NULL;
728 
729  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
730  1,
731  &kCFTypeDictionaryKeyCallBacks,
732  &kCFTypeDictionaryValueCallBacks);
733 
734  switch (codec_type) {
735  case kCMVideoCodecType_MPEG4Video :
736  if (avctx->extradata_size)
737  data = videotoolbox_esds_extradata_create(avctx);
738  if (data)
739  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
740  break;
741  case kCMVideoCodecType_H264 :
743  if (data)
744  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
745  break;
748  if (data)
749  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
750  break;
751  default:
752  break;
753  }
754 
755  CFDictionarySetValue(config_info,
756  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
757  avc_info);
758 
759  if (data)
760  CFRelease(data);
761 
762  CFRelease(avc_info);
763  return config_info;
764 }
765 
766 static int videotoolbox_start(AVCodecContext *avctx)
767 {
768  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
769  OSStatus status;
770  VTDecompressionOutputCallbackRecord decoder_cb;
771  CFDictionaryRef decoder_spec;
772  CFDictionaryRef buf_attr;
773 
774  if (!videotoolbox) {
775  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
776  return -1;
777  }
778 
779  switch( avctx->codec_id ) {
780  case AV_CODEC_ID_H263 :
781  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
782  break;
783  case AV_CODEC_ID_H264 :
784  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
785  break;
786  case AV_CODEC_ID_HEVC :
787  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
788  break;
790  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
791  break;
793  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
794  break;
795  case AV_CODEC_ID_MPEG4 :
796  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
797  break;
798  default :
799  break;
800  }
801 
802  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
803 
804  if (!decoder_spec) {
805  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
806  return -1;
807  }
808 
809  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
810  decoder_spec,
811  avctx->width,
812  avctx->height);
813  if (!videotoolbox->cm_fmt_desc) {
814  if (decoder_spec)
815  CFRelease(decoder_spec);
816 
817  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
818  return -1;
819  }
820 
821  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
822  avctx->height,
823  videotoolbox->cv_pix_fmt_type);
824 
825  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
826  decoder_cb.decompressionOutputRefCon = avctx;
827 
828  status = VTDecompressionSessionCreate(NULL, // allocator
829  videotoolbox->cm_fmt_desc, // videoFormatDescription
830  decoder_spec, // videoDecoderSpecification
831  buf_attr, // destinationImageBufferAttributes
832  &decoder_cb, // outputCallback
833  &videotoolbox->session); // decompressionSessionOut
834 
835  if (decoder_spec)
836  CFRelease(decoder_spec);
837  if (buf_attr)
838  CFRelease(buf_attr);
839 
840  switch (status) {
841  case kVTVideoDecoderNotAvailableNowErr:
842  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
843  return AVERROR(ENOSYS);
844  case kVTVideoDecoderUnsupportedDataFormatErr:
845  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
846  return AVERROR(ENOSYS);
847  case kVTCouldNotFindVideoDecoderErr:
848  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
849  return AVERROR(ENOSYS);
850  case kVTVideoDecoderMalfunctionErr:
851  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
852  return AVERROR(EINVAL);
853  case kVTVideoDecoderBadDataErr:
854  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
855  return AVERROR_INVALIDDATA;
856  case 0:
857  return 0;
858  default:
859  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
860  return AVERROR_UNKNOWN;
861  }
862 }
863 
864 static void videotoolbox_stop(AVCodecContext *avctx)
865 {
866  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
867  if (!videotoolbox)
868  return;
869 
870  if (videotoolbox->cm_fmt_desc) {
871  CFRelease(videotoolbox->cm_fmt_desc);
872  videotoolbox->cm_fmt_desc = NULL;
873  }
874 
875  if (videotoolbox->session) {
876  VTDecompressionSessionInvalidate(videotoolbox->session);
877  CFRelease(videotoolbox->session);
878  videotoolbox->session = NULL;
879  }
880 }
881 
882 static const char *videotoolbox_error_string(OSStatus status)
883 {
884  switch (status) {
885  case kVTVideoDecoderBadDataErr:
886  return "bad data";
887  case kVTVideoDecoderMalfunctionErr:
888  return "decoder malfunction";
889  case kVTInvalidSessionErr:
890  return "invalid session";
891  }
892  return "unknown";
893 }
894 
895 static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
896 {
897  OSStatus status;
898  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
899  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
900 
901  frame->crop_right = 0;
902  frame->crop_left = 0;
903  frame->crop_top = 0;
904  frame->crop_bottom = 0;
905 
906  if (vtctx->reconfig_needed == true) {
907  vtctx->reconfig_needed = false;
908  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
909  videotoolbox_stop(avctx);
910  if (videotoolbox_start(avctx) != 0) {
911  return AVERROR_EXTERNAL;
912  }
913  }
914 
915  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
916  return AVERROR_INVALIDDATA;
917 
918  status = videotoolbox_session_decode_frame(avctx);
919  if (status != noErr) {
920  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
921  vtctx->reconfig_needed = true;
922  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
923  return AVERROR_UNKNOWN;
924  }
925 
926  if (!vtctx->frame) {
927  vtctx->reconfig_needed = true;
928  return AVERROR_UNKNOWN;
929  }
930 
931  return videotoolbox_buffer_create(avctx, frame);
932 }
933 
934 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
935 {
936  H264Context *h = avctx->priv_data;
937  AVFrame *frame = h->cur_pic_ptr->f;
938  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
939  int ret = videotoolbox_common_end_frame(avctx, frame);
940  vtctx->bitstream_size = 0;
941  return ret;
942 }
943 
944 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
945  const uint8_t *buffer,
946  uint32_t size)
947 {
948  return 0;
949 }
950 
951 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
952  const uint8_t *buffer,
953  uint32_t size)
954 {
955  return videotoolbox_common_decode_slice(avctx, buffer, size);
956 }
957 
958 
959 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
960  int type,
961  const uint8_t *buffer,
962  uint32_t size)
963 {
964  return videotoolbox_common_decode_slice(avctx, buffer, size);
965 }
966 
967 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
968 {
969  HEVCContext *h = avctx->priv_data;
970  AVFrame *frame = h->ref->frame;
971  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
972 
973  h->output_frame->crop_right = 0;
974  h->output_frame->crop_left = 0;
975  h->output_frame->crop_top = 0;
976  h->output_frame->crop_bottom = 0;
977 
978  int ret = videotoolbox_common_end_frame(avctx, frame);
979  vtctx->bitstream_size = 0;
980  return ret;
981 }
982 
983 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
984  const uint8_t *buffer,
985  uint32_t size)
986 {
987  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
988 
989  return videotoolbox_buffer_copy(vtctx, buffer, size);
990 }
991 
992 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
993  const uint8_t *buffer,
994  uint32_t size)
995 {
996  return 0;
997 }
998 
999 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1000 {
1001  MpegEncContext *s = avctx->priv_data;
1002  AVFrame *frame = s->current_picture_ptr->f;
1003 
1004  return videotoolbox_common_end_frame(avctx, frame);
1005 }
1006 
1007 static int videotoolbox_uninit(AVCodecContext *avctx)
1008 {
1009  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1010  if (!vtctx)
1011  return 0;
1012 
1013  ff_videotoolbox_uninit(avctx);
1014 
1015  if (vtctx->vt_ctx)
1016  videotoolbox_stop(avctx);
1017 
1019  av_freep(&vtctx->vt_ctx);
1020 
1021  return 0;
1022 }
1023 
1024 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1025  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->pix_fmt);
1026  if (!descriptor)
1027  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1028 
1029  int depth = descriptor->comp[0].depth;
1030  if (depth > 8) {
1031  return AV_PIX_FMT_P010;
1032  }
1033 
1034  return AV_PIX_FMT_NV12;
1035 }
1036 
1037 static int videotoolbox_common_init(AVCodecContext *avctx)
1038 {
1039  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1040  AVHWFramesContext *hw_frames;
1041  int err;
1042 
1043  // Old API - do nothing.
1044  if (avctx->hwaccel_context)
1045  return 0;
1046 
1047  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1048  av_log(avctx, AV_LOG_ERROR,
1049  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1050  return AVERROR(EINVAL);
1051  }
1052 
1054  if (!vtctx->vt_ctx) {
1055  err = AVERROR(ENOMEM);
1056  goto fail;
1057  }
1058 
1059  if (avctx->hw_frames_ctx) {
1060  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1061  } else {
1063  if (!avctx->hw_frames_ctx) {
1064  err = AVERROR(ENOMEM);
1065  goto fail;
1066  }
1067 
1068  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1069  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1070  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1071  hw_frames->width = avctx->width;
1072  hw_frames->height = avctx->height;
1073 
1074  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1075  if (err < 0) {
1076  av_buffer_unref(&avctx->hw_frames_ctx);
1077  goto fail;
1078  }
1079  }
1080 
1082  if (!vtctx->cached_hw_frames_ctx) {
1083  err = AVERROR(ENOMEM);
1084  goto fail;
1085  }
1086 
1087  vtctx->vt_ctx->cv_pix_fmt_type =
1089  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1090  av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
1091  err = AVERROR(EINVAL);
1092  goto fail;
1093  }
1094 
1095  err = videotoolbox_start(avctx);
1096  if (err < 0)
1097  goto fail;
1098 
1099  return 0;
1100 
1101 fail:
1102  videotoolbox_uninit(avctx);
1103  return err;
1104 }
1105 
1106 static int videotoolbox_frame_params(AVCodecContext *avctx,
1108 {
1109  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1110 
1111  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1112  frames_ctx->width = avctx->coded_width;
1113  frames_ctx->height = avctx->coded_height;
1114  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1115 
1116  return 0;
1117 }
1118 
1120  .name = "h263_videotoolbox",
1121  .type = AVMEDIA_TYPE_VIDEO,
1122  .id = AV_CODEC_ID_H263,
1123  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1124  .alloc_frame = ff_videotoolbox_alloc_frame,
1125  .start_frame = videotoolbox_mpeg_start_frame,
1126  .decode_slice = videotoolbox_mpeg_decode_slice,
1127  .end_frame = videotoolbox_mpeg_end_frame,
1128  .frame_params = videotoolbox_frame_params,
1129  .init = videotoolbox_common_init,
1130  .uninit = videotoolbox_uninit,
1131  .priv_data_size = sizeof(VTContext),
1132 };
1133 
1135  .name = "hevc_videotoolbox",
1136  .type = AVMEDIA_TYPE_VIDEO,
1137  .id = AV_CODEC_ID_HEVC,
1138  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1139  .alloc_frame = ff_videotoolbox_alloc_frame,
1140  .start_frame = videotoolbox_hevc_start_frame,
1141  .decode_slice = videotoolbox_hevc_decode_slice,
1142  .decode_params = videotoolbox_hevc_decode_params,
1143  .end_frame = videotoolbox_hevc_end_frame,
1144  .frame_params = videotoolbox_frame_params,
1145  .init = videotoolbox_common_init,
1146  .uninit = ff_videotoolbox_uninit,
1147  .priv_data_size = sizeof(VTContext),
1148 };
1149 
1151  .name = "h264_videotoolbox",
1152  .type = AVMEDIA_TYPE_VIDEO,
1153  .id = AV_CODEC_ID_H264,
1154  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1155  .alloc_frame = ff_videotoolbox_alloc_frame,
1156  .start_frame = ff_videotoolbox_h264_start_frame,
1157  .decode_slice = ff_videotoolbox_h264_decode_slice,
1158  .decode_params = videotoolbox_h264_decode_params,
1159  .end_frame = videotoolbox_h264_end_frame,
1160  .frame_params = videotoolbox_frame_params,
1161  .init = videotoolbox_common_init,
1162  .uninit = videotoolbox_uninit,
1163  .priv_data_size = sizeof(VTContext),
1164 };
1165 
1167  .name = "mpeg1_videotoolbox",
1168  .type = AVMEDIA_TYPE_VIDEO,
1169  .id = AV_CODEC_ID_MPEG1VIDEO,
1170  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1171  .alloc_frame = ff_videotoolbox_alloc_frame,
1172  .start_frame = videotoolbox_mpeg_start_frame,
1173  .decode_slice = videotoolbox_mpeg_decode_slice,
1174  .end_frame = videotoolbox_mpeg_end_frame,
1175  .frame_params = videotoolbox_frame_params,
1176  .init = videotoolbox_common_init,
1177  .uninit = videotoolbox_uninit,
1178  .priv_data_size = sizeof(VTContext),
1179 };
1180 
1182  .name = "mpeg2_videotoolbox",
1183  .type = AVMEDIA_TYPE_VIDEO,
1184  .id = AV_CODEC_ID_MPEG2VIDEO,
1185  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1186  .alloc_frame = ff_videotoolbox_alloc_frame,
1187  .start_frame = videotoolbox_mpeg_start_frame,
1188  .decode_slice = videotoolbox_mpeg_decode_slice,
1189  .end_frame = videotoolbox_mpeg_end_frame,
1190  .frame_params = videotoolbox_frame_params,
1191  .init = videotoolbox_common_init,
1192  .uninit = videotoolbox_uninit,
1193  .priv_data_size = sizeof(VTContext),
1194 };
1195 
1197  .name = "mpeg4_videotoolbox",
1198  .type = AVMEDIA_TYPE_VIDEO,
1199  .id = AV_CODEC_ID_MPEG4,
1200  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1201  .alloc_frame = ff_videotoolbox_alloc_frame,
1202  .start_frame = videotoolbox_mpeg_start_frame,
1203  .decode_slice = videotoolbox_mpeg_decode_slice,
1204  .end_frame = videotoolbox_mpeg_end_frame,
1205  .frame_params = videotoolbox_frame_params,
1206  .init = videotoolbox_common_init,
1207  .uninit = videotoolbox_uninit,
1208  .priv_data_size = sizeof(VTContext),
1209 };
1210 
1211 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt)
1212 {
1213  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1214 
1215  if (ret) {
1216  ret->output_callback = videotoolbox_decoder_callback;
1217 
1218  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt(pix_fmt);
1219  if (cv_pix_fmt_type == 0) {
1220  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1221  }
1222  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1223  }
1224 
1225  return ret;
1226 }
1227 
1229 {
1230  return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE);
1231 }
1232 
1234 {
1235  return av_videotoolbox_default_init2(avctx, NULL);
1236 }
1237 
1239 {
1240  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(videotoolbox_best_pixel_format(avctx));
1241  if (!avctx->hwaccel_context)
1242  return AVERROR(ENOMEM);
1243  return videotoolbox_start(avctx);
1244 }
1245 
1247 {
1248 
1249  videotoolbox_stop(avctx);
1250  av_freep(&avctx->hwaccel_context);
1251 }
1252 #endif /* CONFIG_VIDEOTOOLBOX */
#define COUNT_SIZE_PS(T, t)
const HEVCPPS * pps
Definition: hevc_ps.h:407
AVFrame * frame
Definition: hevcdec.h:312
#define NULL
Definition: coverity.c:32
int min_spatial_segmentation_idc
Definition: hevc_ps.h:168
#define P
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
HEVCFrame * ref
Definition: hevcdec.h:423
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
uint8_t * bitstream
Definition: vt_internal.h:24
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:459
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
static void videotoolbox_uninit(AVCodecContext *s)
VUI vui
Definition: hevc_ps.h:250
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt)
Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format.
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:228
enum AVMediaType codec_type
Definition: rtp.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
Convenience header that includes libavutil&#39;s core.
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:450
GLint GLenum type
Definition: opengl_enc.c:104
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
mpegvideo header.
HEVCParamSets ps
Definition: hevcdec.h:408
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
H264Context.
Definition: h264dec.h:337
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:329
AVFrame * f
Definition: h264dec.h:129
size_t crop_bottom
Definition: frame.h:627
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:41
const AVHWAccel ff_h264_videotoolbox_hwaccel
uint8_t entropy_coding_sync_enabled_flag
Definition: hevc_ps.h:348
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:605
#define AV_PIX_FMT_P010
Definition: pixfmt.h:436
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int chroma_format_idc
Definition: hevc_ps.h:227
#define AV_W8(p, v)
Definition: videotoolbox.c:136
const PPS * pps
Definition: h264_ps.h:145
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:179
Definition: hevc_ps.h:132
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_malloc(s)
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1882
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2737
size_t crop_left
Definition: frame.h:628
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:645
An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX.
AVFrame * output_frame
Definition: hevcdec.h:404
const HEVCVPS * vps
Definition: hevc_ps.h:405
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
int64_t duration
Definition: movenc.c:63
uint8_t tier_flag
Definition: hevc_ps.h:177
#define height
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:138
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define av_log(a,...)
static int videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:64
int bit_depth_chroma
Definition: hevc_ps.h:235
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:184
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
#define S(s, c, i)
uint8_t temporal_id_nesting_flag
Definition: hevc_ps.h:248
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int allocated_size
Definition: vt_internal.h:30
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
GLsizei GLsizei * length
Definition: opengl_enc.c:114
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
#define fail()
Definition: checkasm.h:120
uint8_t tiles_enabled_flag
Definition: hevc_ps.h:347
const AVHWAccel ff_hevc_videotoolbox_hwaccel
size_t crop_top
Definition: frame.h:626
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:177
const HEVCSPS * sps
Definition: hevc_ps.h:406
size_t data_size
Definition: h264_ps.h:103
uint8_t profile_idc
Definition: hevc_ps.h:178
#define b
Definition: input.c:41
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3651
uint8_t data[4096]
Definition: h264_ps.h:129
#define width
int width
picture width / height.
Definition: avcodec.h:1738
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3258
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
#define TRUE
Definition: windows2linux.h:33
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
H.264 / AVC / MPEG-4 part10 codec.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:476
int bitstream_size
Definition: vt_internal.h:27
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
PTLCommon general_ptl
Definition: hevc_ps.h:188
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:220
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:51
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:38
if(ret)
PTL ptl
Definition: hevc_ps.h:200
bool reconfig_needed
Definition: vt_internal.h:44
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
uint8_t sps[3]
Definition: vt_internal.h:43
enum AVCodecID codec_id
Definition: avcodec.h:1575
int max_sub_layers
Definition: hevc_ps.h:242
#define bytestream2_put_ne24
Definition: bytestream.h:124
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:1565
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:389
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session...
Definition: videotoolbox.h:70
void * buf
Definition: avisynth_c.h:766
size_t crop_right
Definition: frame.h:629
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:84
int extradata_size
Definition: avcodec.h:1667
int coded_height
Definition: avcodec.h:1753
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:366
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
struct AVFrame * f
Definition: mpegpicture.h:46
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
H264Picture * cur_pic_ptr
Definition: h264dec.h:346
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
const SPS * sps
Definition: h264_ps.h:146
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
int ff_videotoolbox_uninit(AVCodecContext *avctx)
Definition: videotoolbox.c:401
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:343
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:140
uint8_t level_idc
Definition: hevc_ps.h:180
MpegEncContext.
Definition: mpegvideo.h:81
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:52
A reference to a data buffer.
Definition: buffer.h:81
uint8_t profile_space
Definition: hevc_ps.h:176
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
VTDecompressionOutputCallback output_callback
The output callback that must be passed to the session.
Definition: videotoolbox.h:57
uint8_t data[4096]
Definition: h264_ps.h:102
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
size_t data_size
Definition: h264_ps.h:130
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
H264ParamSets ps
Definition: h264dec.h:456
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:40
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:48
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:106
void * priv_data
Definition: avcodec.h:1592
#define av_free(p)
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
const AVHWAccel ff_h263_videotoolbox_hwaccel
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:36
#define bytestream2_put_ne32
Definition: bytestream.h:125
int height
Definition: frame.h:326
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:55
#define av_freep(p)
uint8_t progressive_source_flag
Definition: hevc_ps.h:181
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
#define bytestream2_put_ne16
Definition: bytestream.h:123
int bit_depth
Definition: hevc_ps.h:234
#define APPEND_PS(T, t)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3310
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:183
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
uint8_t interlaced_source_flag
Definition: hevc_ps.h:182
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:203
GLuint buffer
Definition: opengl_enc.c:101
CVImageBufferRef frame
Definition: vt_internal.h:33
Public libavcodec Videotoolbox header.
#define V
Definition: avdct.c:30
static uint8_t tmp[11]
Definition: aes_ctr.c:26