FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "vt_internal.h"
28 #include "libavutil/avutil.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/pixdesc.h"
31 #include "bytestream.h"
32 #include "decode.h"
33 #include "internal.h"
34 #include "h264dec.h"
35 #include "hevcdec.h"
36 #include "hwaccel_internal.h"
37 #include "mpegvideo.h"
38 #include "proresdec.h"
39 #include <Availability.h>
40 #include <AvailabilityMacros.h>
41 #include <TargetConditionals.h>
42 
43 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
44 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
45 #endif
46 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
47 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
48 #endif
49 
50 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
51 enum { kCMVideoCodecType_HEVC = 'hvc1' };
52 #endif
53 
54 #if !HAVE_KCMVIDEOCODECTYPE_VP9
55 enum { kCMVideoCodecType_VP9 = 'vp09' };
56 #endif
57 
58 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
59 
60 typedef struct VTHWFrame {
61  CVPixelBufferRef pixbuf;
63 } VTHWFrame;
64 
65 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
66 {
68  av_buffer_unref(&ref->hw_frames_ctx);
69  CVPixelBufferRelease(ref->pixbuf);
70 
71  av_free(data);
72 }
73 
75  const uint8_t *buffer,
76  uint32_t size)
77 {
78  void *tmp;
79 
80  tmp = av_fast_realloc(vtctx->bitstream,
81  &vtctx->allocated_size,
82  size);
83 
84  if (!tmp)
85  return AVERROR(ENOMEM);
86 
87  vtctx->bitstream = tmp;
88  memcpy(vtctx->bitstream, buffer, size);
89  vtctx->bitstream_size = size;
90 
91  return 0;
92 }
93 
94 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
95 {
96  int ret;
97  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
98 
99  if (!ref->pixbuf) {
100  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
102  return AVERROR_EXTERNAL;
103  }
104 
105  frame->crop_right = 0;
106  frame->crop_left = 0;
107  frame->crop_top = 0;
108  frame->crop_bottom = 0;
109 
110  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
111  return ret;
112 
113  frame->data[3] = (uint8_t*)ref->pixbuf;
114 
115  if (ref->hw_frames_ctx) {
117  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
118  if (!frame->hw_frames_ctx)
119  return AVERROR(ENOMEM);
120  }
121 
122  return 0;
123 }
124 
126 {
127  size_t size = sizeof(VTHWFrame);
128  uint8_t *data = NULL;
129  AVBufferRef *buf = NULL;
131  FrameDecodeData *fdd;
132  if (ret < 0)
133  return ret;
134 
135  data = av_mallocz(size);
136  if (!data)
137  return AVERROR(ENOMEM);
139  if (!buf) {
140  av_freep(&data);
141  return AVERROR(ENOMEM);
142  }
143  frame->buf[0] = buf;
144 
147 
148  frame->width = avctx->width;
149  frame->height = avctx->height;
150  frame->format = avctx->pix_fmt;
151 
152  return 0;
153 }
154 
155 #define AV_W8(p, v) *(p) = (v)
156 
157 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
158 {
159  int i;
160  int size = src_size;
161  uint8_t* p = dst;
162 
163  for (i = 0; i < src_size; i++) {
164  if (i + 2 < src_size &&
165  src[i] == 0x00 &&
166  src[i + 1] == 0x00 &&
167  src[i + 2] <= 0x03) {
168  if (dst) {
169  *p++ = src[i++];
170  *p++ = src[i];
171  *p++ = 0x03;
172  } else {
173  i++;
174  }
175  size++;
176  } else if (dst)
177  *p++ = src[i];
178  }
179 
180  if (dst)
181  av_assert0((p - dst) == size);
182 
183  return size;
184 }
185 
187 {
188  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
189  H264Context *h = avctx->priv_data;
190  CFDataRef data = NULL;
191  uint8_t *p;
192  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
193  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
194  int vt_extradata_size;
195  uint8_t *vt_extradata;
196 
197  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
198  vt_extradata = av_malloc(vt_extradata_size);
199 
200  if (!vt_extradata)
201  return NULL;
202 
203  p = vt_extradata;
204 
205  AV_W8(p + 0, 1); /* version */
206  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
207  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
208  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
209  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
210  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
211  AV_WB16(p + 6, sps_size);
212  p += 8;
213  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
214  AV_W8(p + 0, 1); /* number of pps */
215  AV_WB16(p + 1, pps_size);
216  p += 3;
217  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
218 
219  av_assert0(p - vt_extradata == vt_extradata_size);
220 
221  // save sps header (profile/level) used to create decoder session,
222  // so we can detect changes and recreate it.
223  if (vtctx)
224  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
225 
226  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
227  av_free(vt_extradata);
228  return data;
229 }
230 
232 {
233  HEVCContext *h = avctx->priv_data;
234  int i, num_vps = 0, num_sps = 0, num_pps = 0;
235  const HEVCVPS *vps = h->ps.vps;
236  const HEVCSPS *sps = h->ps.sps;
237  const HEVCPPS *pps = h->ps.pps;
238  PTLCommon ptlc = vps->ptl.general_ptl;
239  VUI vui = sps->vui;
240  uint8_t parallelismType;
241  CFDataRef data = NULL;
242  uint8_t *p;
243  int vt_extradata_size = 23 + 3 + 3 + 3;
244  uint8_t *vt_extradata;
245 
246 #define COUNT_SIZE_PS(T, t) \
247  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
248  if (h->ps.t##ps_list[i]) { \
249  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
250  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
251  num_##t##ps++; \
252  } \
253  }
254 
255  COUNT_SIZE_PS(V, v)
256  COUNT_SIZE_PS(S, s)
257  COUNT_SIZE_PS(P, p)
258 
259  vt_extradata = av_malloc(vt_extradata_size);
260  if (!vt_extradata)
261  return NULL;
262  p = vt_extradata;
263 
264  /* unsigned int(8) configurationVersion = 1; */
265  AV_W8(p + 0, 1);
266 
267  /*
268  * unsigned int(2) general_profile_space;
269  * unsigned int(1) general_tier_flag;
270  * unsigned int(5) general_profile_idc;
271  */
272  AV_W8(p + 1, ptlc.profile_space << 6 |
273  ptlc.tier_flag << 5 |
274  ptlc.profile_idc);
275 
276  /* unsigned int(32) general_profile_compatibility_flags; */
277  for (i = 0; i < 4; i++) {
278  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
279  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
280  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
281  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
282  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
283  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
284  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
285  ptlc.profile_compatibility_flag[i * 8 + 7]);
286  }
287 
288  /* unsigned int(48) general_constraint_indicator_flags; */
289  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
290  ptlc.interlaced_source_flag << 6 |
291  ptlc.non_packed_constraint_flag << 5 |
292  ptlc.frame_only_constraint_flag << 4);
293  AV_W8(p + 7, 0);
294  AV_WN32(p + 8, 0);
295 
296  /* unsigned int(8) general_level_idc; */
297  AV_W8(p + 12, ptlc.level_idc);
298 
299  /*
300  * bit(4) reserved = ‘1111’b;
301  * unsigned int(12) min_spatial_segmentation_idc;
302  */
303  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
304  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
305 
306  /*
307  * bit(6) reserved = ‘111111’b;
308  * unsigned int(2) parallelismType;
309  */
311  parallelismType = 0;
312  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
313  parallelismType = 0;
314  else if (pps->entropy_coding_sync_enabled_flag)
315  parallelismType = 3;
316  else if (pps->tiles_enabled_flag)
317  parallelismType = 2;
318  else
319  parallelismType = 1;
320  AV_W8(p + 15, 0xfc | parallelismType);
321 
322  /*
323  * bit(6) reserved = ‘111111’b;
324  * unsigned int(2) chromaFormat;
325  */
326  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
327 
328  /*
329  * bit(5) reserved = ‘11111’b;
330  * unsigned int(3) bitDepthLumaMinus8;
331  */
332  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
333 
334  /*
335  * bit(5) reserved = ‘11111’b;
336  * unsigned int(3) bitDepthChromaMinus8;
337  */
338  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
339 
340  /* bit(16) avgFrameRate; */
341  AV_WB16(p + 19, 0);
342 
343  /*
344  * bit(2) constantFrameRate;
345  * bit(3) numTemporalLayers;
346  * bit(1) temporalIdNested;
347  * unsigned int(2) lengthSizeMinusOne;
348  */
349  AV_W8(p + 21, 0 << 6 |
350  sps->max_sub_layers << 3 |
351  sps->temporal_id_nesting_flag << 2 |
352  3);
353 
354  /* unsigned int(8) numOfArrays; */
355  AV_W8(p + 22, 3);
356 
357  p += 23;
358 
359 #define APPEND_PS(T, t) \
360  /* \
361  * bit(1) array_completeness; \
362  * unsigned int(1) reserved = 0; \
363  * unsigned int(6) NAL_unit_type; \
364  */ \
365  AV_W8(p, 1 << 7 | \
366  HEVC_NAL_##T##PS & 0x3f); \
367  /* unsigned int(16) numNalus; */ \
368  AV_WB16(p + 1, num_##t##ps); \
369  p += 3; \
370  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
371  if (h->ps.t##ps_list[i]) { \
372  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
373  int size = escape_ps(p + 2, lps->data, lps->data_size); \
374  /* unsigned int(16) nalUnitLength; */ \
375  AV_WB16(p, size); \
376  /* bit(8*nalUnitLength) nalUnit; */ \
377  p += 2 + size; \
378  } \
379  }
380 
381  APPEND_PS(V, v)
382  APPEND_PS(S, s)
383  APPEND_PS(P, p)
384 
385  av_assert0(p - vt_extradata == vt_extradata_size);
386 
387  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
388  av_free(vt_extradata);
389  return data;
390 }
391 
393  const uint8_t *buffer,
394  uint32_t size)
395 {
396  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
397  H264Context *h = avctx->priv_data;
398 
399  if (h->is_avc == 1) {
400  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
401  }
402 
403  return 0;
404 }
405 
407  int type,
408  const uint8_t *buffer,
409  uint32_t size)
410 {
411  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
412  H264Context *h = avctx->priv_data;
413 
414  // save sps header (profile/level) used to create decoder session
415  if (!vtctx->sps[0])
416  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
417 
418  if (type == H264_NAL_SPS) {
419  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
420  vtctx->reconfig_needed = true;
421  memcpy(vtctx->sps, buffer + 1, 3);
422  }
423  }
424 
425  // pass-through SPS/PPS changes to the decoder
427 }
428 
430  const uint8_t *buffer,
431  uint32_t size)
432 {
433  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
434  void *tmp;
435 
436  tmp = av_fast_realloc(vtctx->bitstream,
437  &vtctx->allocated_size,
438  vtctx->bitstream_size+size+4);
439  if (!tmp)
440  return AVERROR(ENOMEM);
441 
442  vtctx->bitstream = tmp;
443 
444  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
445  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
446 
447  vtctx->bitstream_size += size + 4;
448 
449  return 0;
450 }
451 
453  const uint8_t *buffer,
454  uint32_t size)
455 {
456  H264Context *h = avctx->priv_data;
457 
458  if (h->is_avc == 1)
459  return 0;
460 
462 }
463 
464 #if CONFIG_VIDEOTOOLBOX
465 // Return the AVVideotoolboxContext that matters currently. Where it comes from
466 // depends on the API used.
467 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
468 {
469  // Somewhat tricky because the user can call av_videotoolbox_default_free()
470  // at any time, even when the codec is closed.
471  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
472  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
473  if (vtctx->vt_ctx)
474  return vtctx->vt_ctx;
475  }
476  return avctx->hwaccel_context;
477 }
478 
479 static void videotoolbox_stop(AVCodecContext *avctx)
480 {
481  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
482  if (!videotoolbox)
483  return;
484 
485  if (videotoolbox->cm_fmt_desc) {
486  CFRelease(videotoolbox->cm_fmt_desc);
487  videotoolbox->cm_fmt_desc = NULL;
488  }
489 
490  if (videotoolbox->session) {
491  VTDecompressionSessionInvalidate(videotoolbox->session);
492  CFRelease(videotoolbox->session);
493  videotoolbox->session = NULL;
494  }
495 }
496 
498 {
499  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
500  if (!vtctx)
501  return 0;
502 
503  av_freep(&vtctx->bitstream);
504  if (vtctx->frame)
505  CVPixelBufferRelease(vtctx->frame);
506 
507  if (vtctx->vt_ctx)
508  videotoolbox_stop(avctx);
509 
511  av_freep(&vtctx->vt_ctx);
512 
513  return 0;
514 }
515 
516 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
517 {
518  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
519  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
520  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
521  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
522  int width = CVPixelBufferGetWidth(pixbuf);
523  int height = CVPixelBufferGetHeight(pixbuf);
524  AVHWFramesContext *cached_frames;
525  VTHWFrame *ref;
526  int ret;
527 
528  if (!frame->buf[0] || frame->data[3]) {
529  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
531  return AVERROR_EXTERNAL;
532  }
533 
534  ref = (VTHWFrame *)frame->buf[0]->data;
535 
536  if (ref->pixbuf)
537  CVPixelBufferRelease(ref->pixbuf);
538  ref->pixbuf = vtctx->frame;
539  vtctx->frame = NULL;
540 
541  // Old API code path.
542  if (!vtctx->cached_hw_frames_ctx)
543  return 0;
544 
545  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
546 
547  if (cached_frames->sw_format != sw_format ||
548  cached_frames->width != width ||
549  cached_frames->height != height) {
550  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
551  AVHWFramesContext *hw_frames;
552  if (!hw_frames_ctx)
553  return AVERROR(ENOMEM);
554 
555  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
556  hw_frames->format = cached_frames->format;
557  hw_frames->sw_format = sw_format;
558  hw_frames->width = width;
559  hw_frames->height = height;
560 
561  ret = av_hwframe_ctx_init(hw_frames_ctx);
562  if (ret < 0) {
563  av_buffer_unref(&hw_frames_ctx);
564  return ret;
565  }
566 
568  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
569  }
570 
571  av_buffer_unref(&ref->hw_frames_ctx);
572  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
573  if (!ref->hw_frames_ctx)
574  return AVERROR(ENOMEM);
575 
576  return 0;
577 }
578 
579 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
580 {
581  int i;
582  uint8_t b;
583 
584  for (i = 3; i >= 0; i--) {
585  b = (length >> (i * 7)) & 0x7F;
586  if (i != 0)
587  b |= 0x80;
588 
589  bytestream2_put_byteu(pb, b);
590  }
591 }
592 
593 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
594 {
595  CFDataRef data;
596  uint8_t *rw_extradata;
597  PutByteContext pb;
598  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
599  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
600  int config_size = 13 + 5 + avctx->extradata_size;
601  int s;
602 
603  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
604  return NULL;
605 
606  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
607  bytestream2_put_byteu(&pb, 0); // version
608  bytestream2_put_ne24(&pb, 0); // flags
609 
610  // elementary stream descriptor
611  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
612  videotoolbox_write_mp4_descr_length(&pb, full_size);
613  bytestream2_put_ne16(&pb, 0); // esid
614  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
615 
616  // decoder configuration descriptor
617  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
618  videotoolbox_write_mp4_descr_length(&pb, config_size);
619  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
620  bytestream2_put_byteu(&pb, 0x11); // stream type
621  bytestream2_put_ne24(&pb, 0); // buffer size
622  bytestream2_put_ne32(&pb, 0); // max bitrate
623  bytestream2_put_ne32(&pb, 0); // avg bitrate
624 
625  // decoder specific descriptor
626  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
627  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
628 
629  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
630 
631  // SLConfigDescriptor
632  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
633  bytestream2_put_byteu(&pb, 0x01); // length
634  bytestream2_put_byteu(&pb, 0x02); //
635 
636  s = bytestream2_size_p(&pb);
637 
638  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
639 
640  av_freep(&rw_extradata);
641  return data;
642 }
643 
644 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
645  void *buffer,
646  int size)
647 {
648  OSStatus status;
649  CMBlockBufferRef block_buf;
650  CMSampleBufferRef sample_buf;
651 
652  block_buf = NULL;
653  sample_buf = NULL;
654 
655  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
656  buffer, // memoryBlock
657  size, // blockLength
658  kCFAllocatorNull, // blockAllocator
659  NULL, // customBlockSource
660  0, // offsetToData
661  size, // dataLength
662  0, // flags
663  &block_buf);
664 
665  if (!status) {
666  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
667  block_buf, // dataBuffer
668  TRUE, // dataReady
669  0, // makeDataReadyCallback
670  0, // makeDataReadyRefcon
671  fmt_desc, // formatDescription
672  1, // numSamples
673  0, // numSampleTimingEntries
674  NULL, // sampleTimingArray
675  0, // numSampleSizeEntries
676  NULL, // sampleSizeArray
677  &sample_buf);
678  }
679 
680  if (block_buf)
681  CFRelease(block_buf);
682 
683  return sample_buf;
684 }
685 
686 static void videotoolbox_decoder_callback(void *opaque,
687  void *sourceFrameRefCon,
688  OSStatus status,
689  VTDecodeInfoFlags flags,
690  CVImageBufferRef image_buffer,
691  CMTime pts,
692  CMTime duration)
693 {
694  VTContext *vtctx = opaque;
695 
696  if (vtctx->frame) {
697  CVPixelBufferRelease(vtctx->frame);
698  vtctx->frame = NULL;
699  }
700 
701  if (!image_buffer) {
703  "vt decoder cb: output image buffer is null: %i\n", status);
704  return;
705  }
706 
707  vtctx->frame = CVPixelBufferRetain(image_buffer);
708 }
709 
710 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
711 {
712  OSStatus status;
713  CMSampleBufferRef sample_buf;
714  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
715  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
716 
717  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
718  vtctx->bitstream,
719  vtctx->bitstream_size);
720 
721  if (!sample_buf)
722  return -1;
723 
724  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
725  sample_buf,
726  0, // decodeFlags
727  NULL, // sourceFrameRefCon
728  0); // infoFlagsOut
729  if (status == noErr)
730  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
731 
732  CFRelease(sample_buf);
733 
734  return status;
735 }
736 
737 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
738  CFDictionaryRef decoder_spec,
739  int width,
740  int height)
741 {
742  CMFormatDescriptionRef cm_fmt_desc;
743  OSStatus status;
744 
745  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
746  codec_type,
747  width,
748  height,
749  decoder_spec, // Dictionary of extension
750  &cm_fmt_desc);
751 
752  if (status)
753  return NULL;
754 
755  return cm_fmt_desc;
756 }
757 
758 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
759  int height,
760  OSType pix_fmt)
761 {
762  CFMutableDictionaryRef buffer_attributes;
763  CFMutableDictionaryRef io_surface_properties;
764  CFNumberRef cv_pix_fmt;
765  CFNumberRef w;
766  CFNumberRef h;
767 
768  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
769  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
770  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
771 
772  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
773  4,
774  &kCFTypeDictionaryKeyCallBacks,
775  &kCFTypeDictionaryValueCallBacks);
776  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
777  0,
778  &kCFTypeDictionaryKeyCallBacks,
779  &kCFTypeDictionaryValueCallBacks);
780 
781  if (pix_fmt)
782  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
783  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
784  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
785  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
786 #if TARGET_OS_IPHONE
787  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
788 #else
789  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
790 #endif
791 
792  CFRelease(io_surface_properties);
793  CFRelease(cv_pix_fmt);
794  CFRelease(w);
795  CFRelease(h);
796 
797  return buffer_attributes;
798 }
799 
800 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
801  AVCodecContext *avctx)
802 {
803  CFMutableDictionaryRef avc_info;
804  CFDataRef data = NULL;
805 
806  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
807  0,
808  &kCFTypeDictionaryKeyCallBacks,
809  &kCFTypeDictionaryValueCallBacks);
810 
811  CFDictionarySetValue(config_info,
815  kCFBooleanTrue);
816 
817  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
818  1,
819  &kCFTypeDictionaryKeyCallBacks,
820  &kCFTypeDictionaryValueCallBacks);
821 
822  switch (codec_type) {
823  case kCMVideoCodecType_MPEG4Video :
824  if (avctx->extradata_size)
825  data = videotoolbox_esds_extradata_create(avctx);
826  if (data)
827  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
828  break;
829  case kCMVideoCodecType_H264 :
831  if (data)
832  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
833  break;
836  if (data)
837  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
838  break;
839 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
840  case kCMVideoCodecType_VP9 :
842  if (data)
843  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
844  break;
845 #endif
846  default:
847  break;
848  }
849 
850  CFDictionarySetValue(config_info,
851  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
852  avc_info);
853 
854  if (data)
855  CFRelease(data);
856 
857  CFRelease(avc_info);
858  return config_info;
859 }
860 
861 static int videotoolbox_start(AVCodecContext *avctx)
862 {
863  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
864  OSStatus status;
865  VTDecompressionOutputCallbackRecord decoder_cb;
866  CFDictionaryRef decoder_spec;
867  CFDictionaryRef buf_attr;
868 
869  if (!videotoolbox) {
870  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
871  return -1;
872  }
873 
874  switch( avctx->codec_id ) {
875  case AV_CODEC_ID_H263 :
876  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
877  break;
878  case AV_CODEC_ID_H264 :
879  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
880  break;
881  case AV_CODEC_ID_HEVC :
882  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
883  break;
885  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
886  break;
888  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
889  break;
890  case AV_CODEC_ID_MPEG4 :
891  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
892  break;
893  case AV_CODEC_ID_PRORES :
894  switch (avctx->codec_tag) {
895  default:
896  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
897  // fall-through
898  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
899  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
900  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
901  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
902  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
903  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
904  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
905  break;
906  }
907  break;
908  case AV_CODEC_ID_VP9 :
909  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
910  break;
911  default :
912  break;
913  }
914 
915 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
916  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
917  if (__builtin_available(macOS 10.9, *)) {
918  VTRegisterProfessionalVideoWorkflowVideoDecoders();
919  }
920  }
921 #endif
922 
923 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
924  if (__builtin_available(macOS 11.0, *)) {
925  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
926  }
927 #endif
928 
929  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
930 
931  if (!decoder_spec) {
932  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
933  return -1;
934  }
935 
936  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
937  decoder_spec,
938  avctx->width,
939  avctx->height);
940  if (!videotoolbox->cm_fmt_desc) {
941  if (decoder_spec)
942  CFRelease(decoder_spec);
943 
944  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
945  return -1;
946  }
947 
948  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
949  avctx->height,
950  videotoolbox->cv_pix_fmt_type);
951 
952  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
953  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
954 
955  status = VTDecompressionSessionCreate(NULL, // allocator
956  videotoolbox->cm_fmt_desc, // videoFormatDescription
957  decoder_spec, // videoDecoderSpecification
958  buf_attr, // destinationImageBufferAttributes
959  &decoder_cb, // outputCallback
960  &videotoolbox->session); // decompressionSessionOut
961 
962  if (decoder_spec)
963  CFRelease(decoder_spec);
964  if (buf_attr)
965  CFRelease(buf_attr);
966 
967  switch (status) {
968  case kVTVideoDecoderNotAvailableNowErr:
969  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
970  return AVERROR(ENOSYS);
971  case kVTVideoDecoderUnsupportedDataFormatErr:
972  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
973  return AVERROR(ENOSYS);
974  case kVTCouldNotFindVideoDecoderErr:
975  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
976  return AVERROR(ENOSYS);
977  case kVTVideoDecoderMalfunctionErr:
978  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
979  return AVERROR(EINVAL);
980  case kVTVideoDecoderBadDataErr:
981  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
982  return AVERROR_INVALIDDATA;
983  case 0:
984  return 0;
985  default:
986  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
987  return AVERROR_UNKNOWN;
988  }
989 }
990 
991 static const char *videotoolbox_error_string(OSStatus status)
992 {
993  switch (status) {
994  case kVTVideoDecoderBadDataErr:
995  return "bad data";
996  case kVTVideoDecoderMalfunctionErr:
997  return "decoder malfunction";
998  case kVTInvalidSessionErr:
999  return "invalid session";
1000  }
1001  return "unknown";
1002 }
1003 
1005 {
1006  OSStatus status;
1007  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1008  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1009 
1010  if (vtctx->reconfig_needed == true) {
1011  vtctx->reconfig_needed = false;
1012  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1013  videotoolbox_stop(avctx);
1014  if (videotoolbox_start(avctx) != 0) {
1015  return AVERROR_EXTERNAL;
1016  }
1017  }
1018 
1019  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1020  return AVERROR_INVALIDDATA;
1021 
1022  status = videotoolbox_session_decode_frame(avctx);
1023  if (status != noErr) {
1024  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1025  vtctx->reconfig_needed = true;
1026  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1027  return AVERROR_UNKNOWN;
1028  }
1029 
1030  if (!vtctx->frame) {
1031  vtctx->reconfig_needed = true;
1032  return AVERROR_UNKNOWN;
1033  }
1034 
1035  return videotoolbox_buffer_create(avctx, frame);
1036 }
1037 
1038 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1039 {
1040  H264Context *h = avctx->priv_data;
1041  AVFrame *frame = h->cur_pic_ptr->f;
1042  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1044  vtctx->bitstream_size = 0;
1045  return ret;
1046 }
1047 
1048 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1049  const uint8_t *buffer,
1050  uint32_t size)
1051 {
1052  return 0;
1053 }
1054 
1055 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1056  const uint8_t *buffer,
1057  uint32_t size)
1058 {
1060 }
1061 
1062 
1063 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1064  int type,
1065  const uint8_t *buffer,
1066  uint32_t size)
1067 {
1069 }
1070 
1071 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1072 {
1073  HEVCContext *h = avctx->priv_data;
1074  AVFrame *frame = h->ref->frame;
1075  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1076  int ret;
1077 
1078  h->output_frame->crop_right = 0;
1079  h->output_frame->crop_left = 0;
1080  h->output_frame->crop_top = 0;
1081  h->output_frame->crop_bottom = 0;
1082 
1084  vtctx->bitstream_size = 0;
1085  return ret;
1086 }
1087 
1088 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1089  const uint8_t *buffer,
1090  uint32_t size)
1091 {
1092  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1093 
1094  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1095 }
1096 
1097 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1098  const uint8_t *buffer,
1099  uint32_t size)
1100 {
1101  return 0;
1102 }
1103 
1104 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1105 {
1106  MpegEncContext *s = avctx->priv_data;
1107  AVFrame *frame = s->current_picture_ptr->f;
1108 
1109  return ff_videotoolbox_common_end_frame(avctx, frame);
1110 }
1111 
1112 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1113  const uint8_t *buffer,
1114  uint32_t size)
1115 {
1116  return 0;
1117 }
1118 
1119 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1120  const uint8_t *buffer,
1121  uint32_t size)
1122 {
1123  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1124 
1125  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1126 }
1127 
1128 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1129 {
1130  ProresContext *ctx = avctx->priv_data;
1131  AVFrame *frame = ctx->frame;
1132 
1133  return ff_videotoolbox_common_end_frame(avctx, frame);
1134 }
1135 
1136 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1137  int depth;
1138  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1139  if (!descriptor)
1140  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1141 
1142 
1143  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1144  return AV_PIX_FMT_AYUV64;
1145 
1146  depth = descriptor->comp[0].depth;
1147 
1148 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1149  if (depth > 10)
1150  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1151 #endif
1152 
1153 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1154  if (descriptor->log2_chroma_w == 0) {
1155 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1156  if (depth <= 8)
1157  return AV_PIX_FMT_NV24;
1158 #endif
1159  return AV_PIX_FMT_P410;
1160  }
1161 #endif
1162 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1163  if (descriptor->log2_chroma_h == 0) {
1164 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1165  if (depth <= 8)
1166  return AV_PIX_FMT_NV16;
1167 #endif
1168  return AV_PIX_FMT_P210;
1169  }
1170 #endif
1171 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1172  if (depth > 8) {
1173  return AV_PIX_FMT_P010;
1174  }
1175 #endif
1176 
1177  return AV_PIX_FMT_NV12;
1178 }
1179 
1180 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1181  bool full_range)
1182 {
1183  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1184 
1185  if (ret) {
1186  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1187  if (cv_pix_fmt_type == 0) {
1188  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1189  }
1190  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1191  }
1192 
1193  return ret;
1194 }
1195 
1197 {
1198  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1199  AVHWFramesContext *hw_frames;
1200  int err;
1201  bool full_range;
1202 
1203  vtctx->logctx = avctx;
1204 
1205  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1206  avctx->hwaccel_context)
1207  return videotoolbox_start(avctx);
1208 
1209  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1210  av_log(avctx, AV_LOG_ERROR,
1211  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1212  return AVERROR(EINVAL);
1213  }
1214 
1215  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1216  if (!vtctx->vt_ctx) {
1217  err = AVERROR(ENOMEM);
1218  goto fail;
1219  }
1220 
1221  if (avctx->hw_frames_ctx) {
1222  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1223  } else {
1225  if (!avctx->hw_frames_ctx) {
1226  err = AVERROR(ENOMEM);
1227  goto fail;
1228  }
1229 
1230  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1231  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1232  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1233  hw_frames->width = avctx->width;
1234  hw_frames->height = avctx->height;
1235 
1236  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1237  if (err < 0) {
1238  av_buffer_unref(&avctx->hw_frames_ctx);
1239  goto fail;
1240  }
1241  }
1242 
1244  if (!vtctx->cached_hw_frames_ctx) {
1245  err = AVERROR(ENOMEM);
1246  goto fail;
1247  }
1248 
1250  vtctx->vt_ctx->cv_pix_fmt_type =
1252  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1253  const AVPixFmtDescriptor *attempted_format =
1254  av_pix_fmt_desc_get(hw_frames->sw_format);
1255  av_log(avctx, AV_LOG_ERROR,
1256  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1257  "a VideoToolbox format!\n",
1258  attempted_format ? attempted_format->name : "<unknown>",
1260  err = AVERROR(EINVAL);
1261  goto fail;
1262  }
1263 
1264  err = videotoolbox_start(avctx);
1265  if (err < 0)
1266  goto fail;
1267 
1268  return 0;
1269 
1270 fail:
1271  ff_videotoolbox_uninit(avctx);
1272  return err;
1273 }
1274 
1276  AVBufferRef *hw_frames_ctx)
1277 {
1278  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1279 
1280  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1281  frames_ctx->width = avctx->coded_width;
1282  frames_ctx->height = avctx->coded_height;
1283  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1284 
1285  return 0;
1286 }
1287 
1289  .p.name = "h263_videotoolbox",
1290  .p.type = AVMEDIA_TYPE_VIDEO,
1291  .p.id = AV_CODEC_ID_H263,
1292  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1293  .alloc_frame = ff_videotoolbox_alloc_frame,
1294  .start_frame = videotoolbox_mpeg_start_frame,
1295  .decode_slice = videotoolbox_mpeg_decode_slice,
1296  .end_frame = videotoolbox_mpeg_end_frame,
1297  .frame_params = ff_videotoolbox_frame_params,
1299  .uninit = ff_videotoolbox_uninit,
1300  .priv_data_size = sizeof(VTContext),
1301 };
1302 
1304  .p.name = "hevc_videotoolbox",
1305  .p.type = AVMEDIA_TYPE_VIDEO,
1306  .p.id = AV_CODEC_ID_HEVC,
1307  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1308  .alloc_frame = ff_videotoolbox_alloc_frame,
1309  .start_frame = videotoolbox_hevc_start_frame,
1310  .decode_slice = videotoolbox_hevc_decode_slice,
1311  .decode_params = videotoolbox_hevc_decode_params,
1312  .end_frame = videotoolbox_hevc_end_frame,
1313  .frame_params = ff_videotoolbox_frame_params,
1315  .uninit = ff_videotoolbox_uninit,
1316  .priv_data_size = sizeof(VTContext),
1317 };
1318 
1320  .p.name = "h264_videotoolbox",
1321  .p.type = AVMEDIA_TYPE_VIDEO,
1322  .p.id = AV_CODEC_ID_H264,
1323  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1324  .alloc_frame = ff_videotoolbox_alloc_frame,
1325  .start_frame = ff_videotoolbox_h264_start_frame,
1326  .decode_slice = ff_videotoolbox_h264_decode_slice,
1327  .decode_params = videotoolbox_h264_decode_params,
1328  .end_frame = videotoolbox_h264_end_frame,
1329  .frame_params = ff_videotoolbox_frame_params,
1331  .uninit = ff_videotoolbox_uninit,
1332  .priv_data_size = sizeof(VTContext),
1333 };
1334 
1336  .p.name = "mpeg1_videotoolbox",
1337  .p.type = AVMEDIA_TYPE_VIDEO,
1338  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1339  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1340  .alloc_frame = ff_videotoolbox_alloc_frame,
1341  .start_frame = videotoolbox_mpeg_start_frame,
1342  .decode_slice = videotoolbox_mpeg_decode_slice,
1343  .end_frame = videotoolbox_mpeg_end_frame,
1344  .frame_params = ff_videotoolbox_frame_params,
1346  .uninit = ff_videotoolbox_uninit,
1347  .priv_data_size = sizeof(VTContext),
1348 };
1349 
1351  .p.name = "mpeg2_videotoolbox",
1352  .p.type = AVMEDIA_TYPE_VIDEO,
1353  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1354  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1355  .alloc_frame = ff_videotoolbox_alloc_frame,
1356  .start_frame = videotoolbox_mpeg_start_frame,
1357  .decode_slice = videotoolbox_mpeg_decode_slice,
1358  .end_frame = videotoolbox_mpeg_end_frame,
1359  .frame_params = ff_videotoolbox_frame_params,
1361  .uninit = ff_videotoolbox_uninit,
1362  .priv_data_size = sizeof(VTContext),
1363 };
1364 
1366  .p.name = "mpeg4_videotoolbox",
1367  .p.type = AVMEDIA_TYPE_VIDEO,
1368  .p.id = AV_CODEC_ID_MPEG4,
1369  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1370  .alloc_frame = ff_videotoolbox_alloc_frame,
1371  .start_frame = videotoolbox_mpeg_start_frame,
1372  .decode_slice = videotoolbox_mpeg_decode_slice,
1373  .end_frame = videotoolbox_mpeg_end_frame,
1374  .frame_params = ff_videotoolbox_frame_params,
1376  .uninit = ff_videotoolbox_uninit,
1377  .priv_data_size = sizeof(VTContext),
1378 };
1379 
1381  .p.name = "prores_videotoolbox",
1382  .p.type = AVMEDIA_TYPE_VIDEO,
1383  .p.id = AV_CODEC_ID_PRORES,
1384  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1385  .alloc_frame = ff_videotoolbox_alloc_frame,
1386  .start_frame = videotoolbox_prores_start_frame,
1387  .decode_slice = videotoolbox_prores_decode_slice,
1388  .end_frame = videotoolbox_prores_end_frame,
1389  .frame_params = ff_videotoolbox_frame_params,
1391  .uninit = ff_videotoolbox_uninit,
1392  .priv_data_size = sizeof(VTContext),
1393 };
1394 
1395 
1396 
1397 #if FF_API_VT_HWACCEL_CONTEXT
1398 AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
1399 {
1400  return videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1401 }
1402 
1403 int av_videotoolbox_default_init(AVCodecContext *avctx)
1404 {
1405  return av_videotoolbox_default_init2(avctx, NULL);
1406 }
1407 
1408 int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
1409 {
1410  enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1411  bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1412  avctx->hwaccel_context = vtctx ?: videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1413  if (!avctx->hwaccel_context)
1414  return AVERROR(ENOMEM);
1415  return 0;
1416 }
1417 
1418 void av_videotoolbox_default_free(AVCodecContext *avctx)
1419 {
1420 
1421  videotoolbox_stop(avctx);
1422  av_freep(&avctx->hwaccel_context);
1423 }
1424 #endif /* FF_API_VT_HWACCEL_CONTEXT */
1425 
1426 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:65
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:87
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1459
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:41
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:583
data
const char data[16]
Definition: mxf.c:148
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:155
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:123
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:590
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:126
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:157
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:127
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:128
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:186
fail
#define fail()
Definition: checkasm.h:138
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:643
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:130
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:388
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:47
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
duration
int64_t duration
Definition: movenc.c:64
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:42
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:495
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:122
s
#define s(width, name)
Definition: cbs_vp9.c:198
VTHWFrame
Definition: videotoolbox.c:60
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:64
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:129
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:125
AVFrame::crop_right
size_t crop_right
Definition: frame.h:781
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
frame
static AVFrame * frame
Definition: demux_decode.c:54
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:124
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3280
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:476
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:530
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:448
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:425
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:779
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:61
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:417
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
AVFrame::crop_left
size_t crop_left
Definition: frame.h:780
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:231
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:51
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:124
P
#define P
av_bswap32
#define av_bswap32
Definition: bswap.h:28
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
VUI
Definition: hevc_ps.h:93
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:517
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:82
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:191
height
#define height
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:533
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:529
VTContext
Definition: vt_internal.h:25
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:797
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2135
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:44
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:331
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:368
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1981
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1940
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:125
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:58
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:137
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:752
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:494
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:142
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:94
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:62
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:518
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:115
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:636
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:153
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1593
HEVCSPS
Definition: hevc_ps.h:186
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:303
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:74
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:534
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
AVFrame::crop_top
size_t crop_top
Definition: frame.h:778
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:77
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:152
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1810
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:402
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:55
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200