FFmpeg
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "config_components.h"
25 #include "videotoolbox.h"
27 #include "libavutil/mem.h"
28 #include "vt_internal.h"
29 #include "libavutil/avutil.h"
30 #include "libavutil/hwcontext.h"
31 #include "libavutil/pixdesc.h"
32 #include "bytestream.h"
33 #include "decode.h"
34 #include "internal.h"
35 #include "h264dec.h"
36 #include "hevcdec.h"
37 #include "hwaccel_internal.h"
38 #include "mpegvideo.h"
39 #include "proresdec.h"
40 #include <Availability.h>
41 #include <AvailabilityMacros.h>
42 #include <TargetConditionals.h>
43 
44 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
45 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
46 #endif
47 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
48 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
49 #endif
50 
51 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
52 enum { kCMVideoCodecType_HEVC = 'hvc1' };
53 #endif
54 
55 #if !HAVE_KCMVIDEOCODECTYPE_VP9
56 enum { kCMVideoCodecType_VP9 = 'vp09' };
57 #endif
58 
59 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
60 
61 typedef struct VTHWFrame {
62  CVPixelBufferRef pixbuf;
64 } VTHWFrame;
65 
66 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
67 {
69  av_buffer_unref(&ref->hw_frames_ctx);
70  CVPixelBufferRelease(ref->pixbuf);
71 
72  av_free(data);
73 }
74 
76  const uint8_t *buffer,
77  uint32_t size)
78 {
79  void *tmp;
80 
81  tmp = av_fast_realloc(vtctx->bitstream,
82  &vtctx->allocated_size,
83  size);
84 
85  if (!tmp)
86  return AVERROR(ENOMEM);
87 
88  vtctx->bitstream = tmp;
89  memcpy(vtctx->bitstream, buffer, size);
90  vtctx->bitstream_size = size;
91 
92  return 0;
93 }
94 
95 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
96 {
97  int ret;
98  VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
99 
100  if (!ref->pixbuf) {
101  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
103  return AVERROR_EXTERNAL;
104  }
105 
106  frame->crop_right = 0;
107  frame->crop_left = 0;
108  frame->crop_top = 0;
109  frame->crop_bottom = 0;
110 
111  if ((ret = av_vt_pixbuf_set_attachments(avctx, ref->pixbuf, frame)) < 0)
112  return ret;
113 
114  frame->data[3] = (uint8_t*)ref->pixbuf;
115 
116  if (ref->hw_frames_ctx) {
117  av_buffer_unref(&frame->hw_frames_ctx);
118  frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
119  if (!frame->hw_frames_ctx)
120  return AVERROR(ENOMEM);
121  }
122 
123  return 0;
124 }
125 
127 {
128  size_t size = sizeof(VTHWFrame);
129  uint8_t *data = NULL;
130  AVBufferRef *buf = NULL;
132  FrameDecodeData *fdd;
133  if (ret < 0)
134  return ret;
135 
136  data = av_mallocz(size);
137  if (!data)
138  return AVERROR(ENOMEM);
140  if (!buf) {
141  av_freep(&data);
142  return AVERROR(ENOMEM);
143  }
144  frame->buf[0] = buf;
145 
146  fdd = (FrameDecodeData*)frame->private_ref->data;
148 
149  frame->width = avctx->width;
150  frame->height = avctx->height;
151  frame->format = avctx->pix_fmt;
152 
153  return 0;
154 }
155 
156 #define AV_W8(p, v) *(p) = (v)
157 
158 static int escape_ps(uint8_t* dst, const uint8_t* src, int src_size)
159 {
160  int i;
161  int size = src_size;
162  uint8_t* p = dst;
163 
164  for (i = 0; i < src_size; i++) {
165  if (i + 2 < src_size &&
166  src[i] == 0x00 &&
167  src[i + 1] == 0x00 &&
168  src[i + 2] <= 0x03) {
169  if (dst) {
170  *p++ = src[i++];
171  *p++ = src[i];
172  *p++ = 0x03;
173  } else {
174  i++;
175  }
176  size++;
177  } else if (dst)
178  *p++ = src[i];
179  }
180 
181  if (dst)
182  av_assert0((p - dst) == size);
183 
184  return size;
185 }
186 
188 {
189  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
190  H264Context *h = avctx->priv_data;
191  CFDataRef data = NULL;
192  uint8_t *p;
193  int sps_size = escape_ps(NULL, h->ps.sps->data, h->ps.sps->data_size);
194  int pps_size = escape_ps(NULL, h->ps.pps->data, h->ps.pps->data_size);
195  int vt_extradata_size;
196  uint8_t *vt_extradata;
197 
198  vt_extradata_size = 6 + 2 + sps_size + 3 + pps_size;
199  vt_extradata = av_malloc(vt_extradata_size);
200 
201  if (!vt_extradata)
202  return NULL;
203 
204  p = vt_extradata;
205 
206  AV_W8(p + 0, 1); /* version */
207  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
208  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
209  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
210  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
211  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
212  AV_WB16(p + 6, sps_size);
213  p += 8;
214  p += escape_ps(p, h->ps.sps->data, h->ps.sps->data_size);
215  AV_W8(p + 0, 1); /* number of pps */
216  AV_WB16(p + 1, pps_size);
217  p += 3;
218  p += escape_ps(p, h->ps.pps->data, h->ps.pps->data_size);
219 
220  av_assert0(p - vt_extradata == vt_extradata_size);
221 
222  // save sps header (profile/level) used to create decoder session,
223  // so we can detect changes and recreate it.
224  if (vtctx)
225  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
226 
227  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
228  av_free(vt_extradata);
229  return data;
230 }
231 
233 {
234  HEVCContext *h = avctx->priv_data;
235  int i, num_vps = 0, num_sps = 0, num_pps = 0;
236  const HEVCVPS *vps = h->ps.vps;
237  const HEVCSPS *sps = h->ps.sps;
238  const HEVCPPS *pps = h->ps.pps;
239  PTLCommon ptlc = vps->ptl.general_ptl;
240  VUI vui = sps->vui;
241  uint8_t parallelismType;
242  CFDataRef data = NULL;
243  uint8_t *p;
244  int vt_extradata_size = 23 + 3 + 3 + 3;
245  uint8_t *vt_extradata;
246 
247 #define COUNT_SIZE_PS(T, t) \
248  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
249  if (h->ps.t##ps_list[i]) { \
250  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
251  vt_extradata_size += 2 + escape_ps(NULL, lps->data, lps->data_size); \
252  num_##t##ps++; \
253  } \
254  }
255 
256  COUNT_SIZE_PS(V, v)
257  COUNT_SIZE_PS(S, s)
258  COUNT_SIZE_PS(P, p)
259 
260  vt_extradata = av_malloc(vt_extradata_size);
261  if (!vt_extradata)
262  return NULL;
263  p = vt_extradata;
264 
265  /* unsigned int(8) configurationVersion = 1; */
266  AV_W8(p + 0, 1);
267 
268  /*
269  * unsigned int(2) general_profile_space;
270  * unsigned int(1) general_tier_flag;
271  * unsigned int(5) general_profile_idc;
272  */
273  AV_W8(p + 1, ptlc.profile_space << 6 |
274  ptlc.tier_flag << 5 |
275  ptlc.profile_idc);
276 
277  /* unsigned int(32) general_profile_compatibility_flags; */
278  for (i = 0; i < 4; i++) {
279  AV_W8(p + 2 + i, ptlc.profile_compatibility_flag[i * 8] << 7 |
280  ptlc.profile_compatibility_flag[i * 8 + 1] << 6 |
281  ptlc.profile_compatibility_flag[i * 8 + 2] << 5 |
282  ptlc.profile_compatibility_flag[i * 8 + 3] << 4 |
283  ptlc.profile_compatibility_flag[i * 8 + 4] << 3 |
284  ptlc.profile_compatibility_flag[i * 8 + 5] << 2 |
285  ptlc.profile_compatibility_flag[i * 8 + 6] << 1 |
286  ptlc.profile_compatibility_flag[i * 8 + 7]);
287  }
288 
289  /* unsigned int(48) general_constraint_indicator_flags; */
290  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
291  ptlc.interlaced_source_flag << 6 |
292  ptlc.non_packed_constraint_flag << 5 |
293  ptlc.frame_only_constraint_flag << 4);
294  AV_W8(p + 7, 0);
295  AV_WN32(p + 8, 0);
296 
297  /* unsigned int(8) general_level_idc; */
298  AV_W8(p + 12, ptlc.level_idc);
299 
300  /*
301  * bit(4) reserved = ‘1111’b;
302  * unsigned int(12) min_spatial_segmentation_idc;
303  */
304  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
305  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
306 
307  /*
308  * bit(6) reserved = ‘111111’b;
309  * unsigned int(2) parallelismType;
310  */
312  parallelismType = 0;
313  else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
314  parallelismType = 0;
315  else if (pps->entropy_coding_sync_enabled_flag)
316  parallelismType = 3;
317  else if (pps->tiles_enabled_flag)
318  parallelismType = 2;
319  else
320  parallelismType = 1;
321  AV_W8(p + 15, 0xfc | parallelismType);
322 
323  /*
324  * bit(6) reserved = ‘111111’b;
325  * unsigned int(2) chromaFormat;
326  */
327  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
328 
329  /*
330  * bit(5) reserved = ‘11111’b;
331  * unsigned int(3) bitDepthLumaMinus8;
332  */
333  AV_W8(p + 17, (sps->bit_depth - 8) | 0xf8);
334 
335  /*
336  * bit(5) reserved = ‘11111’b;
337  * unsigned int(3) bitDepthChromaMinus8;
338  */
339  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xf8);
340 
341  /* bit(16) avgFrameRate; */
342  AV_WB16(p + 19, 0);
343 
344  /*
345  * bit(2) constantFrameRate;
346  * bit(3) numTemporalLayers;
347  * bit(1) temporalIdNested;
348  * unsigned int(2) lengthSizeMinusOne;
349  */
350  AV_W8(p + 21, 0 << 6 |
351  sps->max_sub_layers << 3 |
352  sps->temporal_id_nesting_flag << 2 |
353  3);
354 
355  /* unsigned int(8) numOfArrays; */
356  AV_W8(p + 22, 3);
357 
358  p += 23;
359 
360 #define APPEND_PS(T, t) \
361  /* \
362  * bit(1) array_completeness; \
363  * unsigned int(1) reserved = 0; \
364  * unsigned int(6) NAL_unit_type; \
365  */ \
366  AV_W8(p, 1 << 7 | \
367  HEVC_NAL_##T##PS & 0x3f); \
368  /* unsigned int(16) numNalus; */ \
369  AV_WB16(p + 1, num_##t##ps); \
370  p += 3; \
371  for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
372  if (h->ps.t##ps_list[i]) { \
373  const HEVC##T##PS *lps = h->ps.t##ps_list[i]; \
374  int size = escape_ps(p + 2, lps->data, lps->data_size); \
375  /* unsigned int(16) nalUnitLength; */ \
376  AV_WB16(p, size); \
377  /* bit(8*nalUnitLength) nalUnit; */ \
378  p += 2 + size; \
379  } \
380  }
381 
382  APPEND_PS(V, v)
383  APPEND_PS(S, s)
384  APPEND_PS(P, p)
385 
386  av_assert0(p - vt_extradata == vt_extradata_size);
387 
388  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
389  av_free(vt_extradata);
390  return data;
391 }
392 
394  const uint8_t *buffer,
395  uint32_t size)
396 {
397  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
398  H264Context *h = avctx->priv_data;
399 
400  if (h->is_avc == 1) {
401  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
402  }
403 
404  return 0;
405 }
406 
408  int type,
409  const uint8_t *buffer,
410  uint32_t size)
411 {
412  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
413  H264Context *h = avctx->priv_data;
414 
415  // save sps header (profile/level) used to create decoder session
416  if (!vtctx->sps[0])
417  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
418 
419  if (type == H264_NAL_SPS) {
420  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
421  vtctx->reconfig_needed = true;
422  memcpy(vtctx->sps, buffer + 1, 3);
423  }
424  }
425 
426  // pass-through SPS/PPS changes to the decoder
428 }
429 
431  const uint8_t *buffer,
432  uint32_t size)
433 {
434  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
435  void *tmp;
436 
437  tmp = av_fast_realloc(vtctx->bitstream,
438  &vtctx->allocated_size,
439  vtctx->bitstream_size+size+4);
440  if (!tmp)
441  return AVERROR(ENOMEM);
442 
443  vtctx->bitstream = tmp;
444 
445  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
446  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
447 
448  vtctx->bitstream_size += size + 4;
449 
450  return 0;
451 }
452 
454  const uint8_t *buffer,
455  uint32_t size)
456 {
457  H264Context *h = avctx->priv_data;
458 
459  if (h->is_avc == 1)
460  return 0;
461 
463 }
464 
465 #if CONFIG_VIDEOTOOLBOX
466 // Return the AVVideotoolboxContext that matters currently. Where it comes from
467 // depends on the API used.
468 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
469 {
470  // Somewhat tricky because the user can call av_videotoolbox_default_free()
471  // at any time, even when the codec is closed.
472  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
473  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
474  if (vtctx->vt_ctx)
475  return vtctx->vt_ctx;
476  }
477  return avctx->hwaccel_context;
478 }
479 
480 static void videotoolbox_stop(AVCodecContext *avctx)
481 {
482  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
483  if (!videotoolbox)
484  return;
485 
486  if (videotoolbox->cm_fmt_desc) {
487  CFRelease(videotoolbox->cm_fmt_desc);
488  videotoolbox->cm_fmt_desc = NULL;
489  }
490 
491  if (videotoolbox->session) {
492  VTDecompressionSessionInvalidate(videotoolbox->session);
493  CFRelease(videotoolbox->session);
494  videotoolbox->session = NULL;
495  }
496 }
497 
499 {
500  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
501  if (!vtctx)
502  return 0;
503 
504  av_freep(&vtctx->bitstream);
505  if (vtctx->frame)
506  CVPixelBufferRelease(vtctx->frame);
507 
508  if (vtctx->vt_ctx)
509  videotoolbox_stop(avctx);
510 
512  av_freep(&vtctx->vt_ctx);
513 
514  return 0;
515 }
516 
517 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
518 {
519  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
520  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
521  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
522  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
523  int width = CVPixelBufferGetWidth(pixbuf);
524  int height = CVPixelBufferGetHeight(pixbuf);
525  AVHWFramesContext *cached_frames;
526  VTHWFrame *ref;
527  int ret;
528 
529  if (!frame->buf[0] || frame->data[3]) {
530  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
532  return AVERROR_EXTERNAL;
533  }
534 
535  ref = (VTHWFrame *)frame->buf[0]->data;
536 
537  if (ref->pixbuf)
538  CVPixelBufferRelease(ref->pixbuf);
539  ref->pixbuf = vtctx->frame;
540  vtctx->frame = NULL;
541 
542  // Old API code path.
543  if (!vtctx->cached_hw_frames_ctx)
544  return 0;
545 
546  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
547 
548  if (cached_frames->sw_format != sw_format ||
549  cached_frames->width != width ||
550  cached_frames->height != height) {
551  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
552  AVHWFramesContext *hw_frames;
553  AVVTFramesContext *hw_ctx;
554  if (!hw_frames_ctx)
555  return AVERROR(ENOMEM);
556 
557  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
558  hw_frames->format = cached_frames->format;
559  hw_frames->sw_format = sw_format;
560  hw_frames->width = width;
561  hw_frames->height = height;
562  hw_ctx = hw_frames->hwctx;
563  hw_ctx->color_range = avctx->color_range;
564 
565  ret = av_hwframe_ctx_init(hw_frames_ctx);
566  if (ret < 0) {
567  av_buffer_unref(&hw_frames_ctx);
568  return ret;
569  }
570 
572  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
573  }
574 
575  av_buffer_unref(&ref->hw_frames_ctx);
576  ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
577  if (!ref->hw_frames_ctx)
578  return AVERROR(ENOMEM);
579 
580  return 0;
581 }
582 
583 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
584 {
585  int i;
586  uint8_t b;
587 
588  for (i = 3; i >= 0; i--) {
589  b = (length >> (i * 7)) & 0x7F;
590  if (i != 0)
591  b |= 0x80;
592 
593  bytestream2_put_byteu(pb, b);
594  }
595 }
596 
597 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
598 {
599  CFDataRef data;
600  uint8_t *rw_extradata;
601  PutByteContext pb;
602  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
603  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
604  int config_size = 13 + 5 + avctx->extradata_size;
605  int s;
606 
607  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
608  return NULL;
609 
610  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
611  bytestream2_put_byteu(&pb, 0); // version
612  bytestream2_put_ne24(&pb, 0); // flags
613 
614  // elementary stream descriptor
615  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
616  videotoolbox_write_mp4_descr_length(&pb, full_size);
617  bytestream2_put_ne16(&pb, 0); // esid
618  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
619 
620  // decoder configuration descriptor
621  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
622  videotoolbox_write_mp4_descr_length(&pb, config_size);
623  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
624  bytestream2_put_byteu(&pb, 0x11); // stream type
625  bytestream2_put_ne24(&pb, 0); // buffer size
626  bytestream2_put_ne32(&pb, 0); // max bitrate
627  bytestream2_put_ne32(&pb, 0); // avg bitrate
628 
629  // decoder specific descriptor
630  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
631  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
632 
633  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
634 
635  // SLConfigDescriptor
636  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
637  bytestream2_put_byteu(&pb, 0x01); // length
638  bytestream2_put_byteu(&pb, 0x02); //
639 
640  s = bytestream2_size_p(&pb);
641 
642  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
643 
644  av_freep(&rw_extradata);
645  return data;
646 }
647 
648 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
649  void *buffer,
650  int size)
651 {
652  OSStatus status;
653  CMBlockBufferRef block_buf;
654  CMSampleBufferRef sample_buf;
655 
656  block_buf = NULL;
657  sample_buf = NULL;
658 
659  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
660  buffer, // memoryBlock
661  size, // blockLength
662  kCFAllocatorNull, // blockAllocator
663  NULL, // customBlockSource
664  0, // offsetToData
665  size, // dataLength
666  0, // flags
667  &block_buf);
668 
669  if (!status) {
670  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
671  block_buf, // dataBuffer
672  TRUE, // dataReady
673  0, // makeDataReadyCallback
674  0, // makeDataReadyRefcon
675  fmt_desc, // formatDescription
676  1, // numSamples
677  0, // numSampleTimingEntries
678  NULL, // sampleTimingArray
679  0, // numSampleSizeEntries
680  NULL, // sampleSizeArray
681  &sample_buf);
682  }
683 
684  if (block_buf)
685  CFRelease(block_buf);
686 
687  return sample_buf;
688 }
689 
690 static void videotoolbox_decoder_callback(void *opaque,
691  void *sourceFrameRefCon,
692  OSStatus status,
693  VTDecodeInfoFlags flags,
694  CVImageBufferRef image_buffer,
695  CMTime pts,
696  CMTime duration)
697 {
698  VTContext *vtctx = opaque;
699 
700  if (vtctx->frame) {
701  CVPixelBufferRelease(vtctx->frame);
702  vtctx->frame = NULL;
703  }
704 
705  if (!image_buffer) {
707  "vt decoder cb: output image buffer is null: %i\n", status);
708  return;
709  }
710 
711  vtctx->frame = CVPixelBufferRetain(image_buffer);
712 }
713 
714 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
715 {
716  OSStatus status;
717  CMSampleBufferRef sample_buf;
718  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
719  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
720 
721  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
722  vtctx->bitstream,
723  vtctx->bitstream_size);
724 
725  if (!sample_buf)
726  return -1;
727 
728  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
729  sample_buf,
730  0, // decodeFlags
731  NULL, // sourceFrameRefCon
732  0); // infoFlagsOut
733  if (status == noErr)
734  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
735 
736  CFRelease(sample_buf);
737 
738  return status;
739 }
740 
741 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
742  CFDictionaryRef decoder_spec,
743  int width,
744  int height)
745 {
746  CMFormatDescriptionRef cm_fmt_desc;
747  OSStatus status;
748 
749  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
750  codec_type,
751  width,
752  height,
753  decoder_spec, // Dictionary of extension
754  &cm_fmt_desc);
755 
756  if (status)
757  return NULL;
758 
759  return cm_fmt_desc;
760 }
761 
762 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
763  int height,
764  OSType pix_fmt)
765 {
766  CFMutableDictionaryRef buffer_attributes;
767  CFMutableDictionaryRef io_surface_properties;
768  CFNumberRef cv_pix_fmt;
769  CFNumberRef w;
770  CFNumberRef h;
771 
772  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
773  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
774  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
775 
776  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
777  4,
778  &kCFTypeDictionaryKeyCallBacks,
779  &kCFTypeDictionaryValueCallBacks);
780  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
781  0,
782  &kCFTypeDictionaryKeyCallBacks,
783  &kCFTypeDictionaryValueCallBacks);
784 
785  if (pix_fmt)
786  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
787  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
788  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
789  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
790 #if TARGET_OS_IPHONE
791  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
792 #else
793  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
794 #endif
795 
796  CFRelease(io_surface_properties);
797  CFRelease(cv_pix_fmt);
798  CFRelease(w);
799  CFRelease(h);
800 
801  return buffer_attributes;
802 }
803 
804 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
805  AVCodecContext *avctx)
806 {
807  CFMutableDictionaryRef avc_info;
808  CFDataRef data = NULL;
809 
810  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
811  0,
812  &kCFTypeDictionaryKeyCallBacks,
813  &kCFTypeDictionaryValueCallBacks);
814 
815  CFDictionarySetValue(config_info,
819  kCFBooleanTrue);
820 
821  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
822  1,
823  &kCFTypeDictionaryKeyCallBacks,
824  &kCFTypeDictionaryValueCallBacks);
825 
826  switch (codec_type) {
827  case kCMVideoCodecType_MPEG4Video :
828  if (avctx->extradata_size)
829  data = videotoolbox_esds_extradata_create(avctx);
830  if (data)
831  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
832  break;
833  case kCMVideoCodecType_H264 :
835  if (data)
836  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
837  break;
840  if (data)
841  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
842  break;
843 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
844  case kCMVideoCodecType_VP9 :
846  if (data)
847  CFDictionarySetValue(avc_info, CFSTR("vpcC"), data);
848  break;
849 #endif
850  default:
851  break;
852  }
853 
854  CFDictionarySetValue(config_info,
855  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
856  avc_info);
857 
858  if (data)
859  CFRelease(data);
860 
861  CFRelease(avc_info);
862  return config_info;
863 }
864 
865 static int videotoolbox_start(AVCodecContext *avctx)
866 {
867  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
868  OSStatus status;
869  VTDecompressionOutputCallbackRecord decoder_cb;
870  CFDictionaryRef decoder_spec;
871  CFDictionaryRef buf_attr;
872 
873  if (!videotoolbox) {
874  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
875  return -1;
876  }
877 
878  switch( avctx->codec_id ) {
879  case AV_CODEC_ID_H263 :
880  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
881  break;
882  case AV_CODEC_ID_H264 :
883  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
884  break;
885  case AV_CODEC_ID_HEVC :
886  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
887  break;
889  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
890  break;
892  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
893  break;
894  case AV_CODEC_ID_MPEG4 :
895  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
896  break;
897  case AV_CODEC_ID_PRORES :
898  switch (avctx->codec_tag) {
899  default:
900  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
901  // fall-through
902  case MKTAG('a','p','c','o'): // kCMVideoCodecType_AppleProRes422Proxy
903  case MKTAG('a','p','c','s'): // kCMVideoCodecType_AppleProRes422LT
904  case MKTAG('a','p','c','n'): // kCMVideoCodecType_AppleProRes422
905  case MKTAG('a','p','c','h'): // kCMVideoCodecType_AppleProRes422HQ
906  case MKTAG('a','p','4','h'): // kCMVideoCodecType_AppleProRes4444
907  case MKTAG('a','p','4','x'): // kCMVideoCodecType_AppleProRes4444XQ
908  videotoolbox->cm_codec_type = av_bswap32(avctx->codec_tag);
909  break;
910  }
911  break;
912  case AV_CODEC_ID_VP9 :
913  videotoolbox->cm_codec_type = kCMVideoCodecType_VP9;
914  break;
915  default :
916  break;
917  }
918 
919 #if defined(MAC_OS_X_VERSION_10_9) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9) && AV_HAS_BUILTIN(__builtin_available)
920  if (avctx->codec_id == AV_CODEC_ID_PRORES) {
921  if (__builtin_available(macOS 10.9, *)) {
922  VTRegisterProfessionalVideoWorkflowVideoDecoders();
923  }
924  }
925 #endif
926 
927 #if defined(MAC_OS_VERSION_11_0) && !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0) && AV_HAS_BUILTIN(__builtin_available)
928  if (__builtin_available(macOS 11.0, *)) {
929  VTRegisterSupplementalVideoDecoderIfAvailable(videotoolbox->cm_codec_type);
930  }
931 #endif
932 
933  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
934 
935  if (!decoder_spec) {
936  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
937  return -1;
938  }
939 
940  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
941  decoder_spec,
942  avctx->width,
943  avctx->height);
944  if (!videotoolbox->cm_fmt_desc) {
945  if (decoder_spec)
946  CFRelease(decoder_spec);
947 
948  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
949  return -1;
950  }
951 
952  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
953  avctx->height,
954  videotoolbox->cv_pix_fmt_type);
955 
956  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
957  decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data;
958 
959  status = VTDecompressionSessionCreate(NULL, // allocator
960  videotoolbox->cm_fmt_desc, // videoFormatDescription
961  decoder_spec, // videoDecoderSpecification
962  buf_attr, // destinationImageBufferAttributes
963  &decoder_cb, // outputCallback
964  &videotoolbox->session); // decompressionSessionOut
965 
966  if (decoder_spec)
967  CFRelease(decoder_spec);
968  if (buf_attr)
969  CFRelease(buf_attr);
970 
971  switch (status) {
972  case kVTVideoDecoderNotAvailableNowErr:
973  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
974  return AVERROR(ENOSYS);
975  case kVTVideoDecoderUnsupportedDataFormatErr:
976  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
977  return AVERROR(ENOSYS);
978  case kVTCouldNotFindVideoDecoderErr:
979  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
980  return AVERROR(ENOSYS);
981  case kVTVideoDecoderMalfunctionErr:
982  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
983  return AVERROR(EINVAL);
984  case kVTVideoDecoderBadDataErr:
985  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
986  return AVERROR_INVALIDDATA;
987  case 0:
988  return 0;
989  default:
990  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
991  return AVERROR_UNKNOWN;
992  }
993 }
994 
995 static const char *videotoolbox_error_string(OSStatus status)
996 {
997  switch (status) {
998  case kVTVideoDecoderBadDataErr:
999  return "bad data";
1000  case kVTVideoDecoderMalfunctionErr:
1001  return "decoder malfunction";
1002  case kVTInvalidSessionErr:
1003  return "invalid session";
1004  }
1005  return "unknown";
1006 }
1007 
1009 {
1010  OSStatus status;
1011  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
1012  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1013 
1014  if (vtctx->reconfig_needed == true) {
1015  vtctx->reconfig_needed = false;
1016  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
1017  videotoolbox_stop(avctx);
1018  if (videotoolbox_start(avctx) != 0) {
1019  return AVERROR_EXTERNAL;
1020  }
1021  }
1022 
1023  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
1024  return AVERROR_INVALIDDATA;
1025 
1026  status = videotoolbox_session_decode_frame(avctx);
1027  if (status != noErr) {
1028  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
1029  vtctx->reconfig_needed = true;
1030  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
1031  return AVERROR_UNKNOWN;
1032  }
1033 
1034  if (!vtctx->frame) {
1035  vtctx->reconfig_needed = true;
1036  return AVERROR_UNKNOWN;
1037  }
1038 
1039  return videotoolbox_buffer_create(avctx, frame);
1040 }
1041 
1042 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
1043 {
1044  H264Context *h = avctx->priv_data;
1045  AVFrame *frame = h->cur_pic_ptr->f;
1046  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1048  vtctx->bitstream_size = 0;
1049  return ret;
1050 }
1051 
1052 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
1053  const uint8_t *buffer,
1054  uint32_t size)
1055 {
1056  return 0;
1057 }
1058 
1059 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
1060  const uint8_t *buffer,
1061  uint32_t size)
1062 {
1064 }
1065 
1066 
1067 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
1068  int type,
1069  const uint8_t *buffer,
1070  uint32_t size)
1071 {
1073 }
1074 
1075 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
1076 {
1077  HEVCContext *h = avctx->priv_data;
1078  AVFrame *frame = h->ref->frame;
1079  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1080  int ret;
1081 
1082  h->output_frame->crop_right = 0;
1083  h->output_frame->crop_left = 0;
1084  h->output_frame->crop_top = 0;
1085  h->output_frame->crop_bottom = 0;
1086 
1088  vtctx->bitstream_size = 0;
1089  return ret;
1090 }
1091 
1092 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
1093  const uint8_t *buffer,
1094  uint32_t size)
1095 {
1096  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1097 
1098  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1099 }
1100 
1101 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
1102  const uint8_t *buffer,
1103  uint32_t size)
1104 {
1105  return 0;
1106 }
1107 
1108 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1109 {
1110  MpegEncContext *s = avctx->priv_data;
1111  AVFrame *frame = s->current_picture_ptr->f;
1112 
1113  return ff_videotoolbox_common_end_frame(avctx, frame);
1114 }
1115 
1116 static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
1117  const uint8_t *buffer,
1118  uint32_t size)
1119 {
1120  return 0;
1121 }
1122 
1123 static int videotoolbox_prores_decode_slice(AVCodecContext *avctx,
1124  const uint8_t *buffer,
1125  uint32_t size)
1126 {
1127  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1128 
1129  return ff_videotoolbox_buffer_copy(vtctx, buffer, size);
1130 }
1131 
1132 static int videotoolbox_prores_end_frame(AVCodecContext *avctx)
1133 {
1134  ProresContext *ctx = avctx->priv_data;
1135  AVFrame *frame = ctx->frame;
1136 
1137  return ff_videotoolbox_common_end_frame(avctx, frame);
1138 }
1139 
1140 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1141  int depth;
1142  const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
1143  if (!descriptor)
1144  return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1145 
1146 
1147  if (descriptor->flags & AV_PIX_FMT_FLAG_ALPHA)
1148  return AV_PIX_FMT_AYUV64;
1149 
1150  depth = descriptor->comp[0].depth;
1151 
1152 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR16BIPLANARVIDEORANGE
1153  if (depth > 10)
1154  return descriptor->log2_chroma_w == 0 ? AV_PIX_FMT_P416 : AV_PIX_FMT_P216;
1155 #endif
1156 
1157 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR10BIPLANARVIDEORANGE
1158  if (descriptor->log2_chroma_w == 0) {
1159 #if HAVE_KCVPIXELFORMATTYPE_444YPCBCR8BIPLANARVIDEORANGE
1160  if (depth <= 8)
1161  return AV_PIX_FMT_NV24;
1162 #endif
1163  return AV_PIX_FMT_P410;
1164  }
1165 #endif
1166 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR10BIPLANARVIDEORANGE
1167  if (descriptor->log2_chroma_h == 0) {
1168 #if HAVE_KCVPIXELFORMATTYPE_422YPCBCR8BIPLANARVIDEORANGE
1169  if (depth <= 8)
1170  return AV_PIX_FMT_NV16;
1171 #endif
1172  return AV_PIX_FMT_P210;
1173  }
1174 #endif
1175 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
1176  if (depth > 8) {
1177  return AV_PIX_FMT_P010;
1178  }
1179 #endif
1180 
1181  return AV_PIX_FMT_NV12;
1182 }
1183 
1184 static AVVideotoolboxContext *videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1185  bool full_range)
1186 {
1187  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1188 
1189  if (ret) {
1190  OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1191  if (cv_pix_fmt_type == 0) {
1192  cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1193  }
1194  ret->cv_pix_fmt_type = cv_pix_fmt_type;
1195  }
1196 
1197  return ret;
1198 }
1199 
1201 {
1202  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1203  AVHWFramesContext *hw_frames;
1204  AVVTFramesContext *hw_ctx;
1205  int err;
1206  bool full_range;
1207 
1208  vtctx->logctx = avctx;
1209 
1210  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx &&
1211  avctx->hwaccel_context)
1212  return videotoolbox_start(avctx);
1213 
1214  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1215  av_log(avctx, AV_LOG_ERROR,
1216  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1217  return AVERROR(EINVAL);
1218  }
1219 
1220  vtctx->vt_ctx = videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1221  if (!vtctx->vt_ctx) {
1222  err = AVERROR(ENOMEM);
1223  goto fail;
1224  }
1225 
1226  if (avctx->hw_frames_ctx) {
1227  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1228  } else {
1230  if (!avctx->hw_frames_ctx) {
1231  err = AVERROR(ENOMEM);
1232  goto fail;
1233  }
1234 
1235  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1236  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1237  hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1238  hw_frames->width = avctx->width;
1239  hw_frames->height = avctx->height;
1240  hw_ctx = hw_frames->hwctx;
1241  hw_ctx->color_range = avctx->color_range;
1242 
1243  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1244  if (err < 0) {
1245  av_buffer_unref(&avctx->hw_frames_ctx);
1246  goto fail;
1247  }
1248  }
1249 
1251  if (!vtctx->cached_hw_frames_ctx) {
1252  err = AVERROR(ENOMEM);
1253  goto fail;
1254  }
1255 
1257  vtctx->vt_ctx->cv_pix_fmt_type =
1259  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1260  const AVPixFmtDescriptor *attempted_format =
1261  av_pix_fmt_desc_get(hw_frames->sw_format);
1262  av_log(avctx, AV_LOG_ERROR,
1263  "Failed to map underlying FFmpeg pixel format %s (%s range) to "
1264  "a VideoToolbox format!\n",
1265  attempted_format ? attempted_format->name : "<unknown>",
1267  err = AVERROR(EINVAL);
1268  goto fail;
1269  }
1270 
1271  err = videotoolbox_start(avctx);
1272  if (err < 0)
1273  goto fail;
1274 
1275  return 0;
1276 
1277 fail:
1278  ff_videotoolbox_uninit(avctx);
1279  return err;
1280 }
1281 
1283  AVBufferRef *hw_frames_ctx)
1284 {
1285  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1286 
1287  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1288  frames_ctx->width = avctx->coded_width;
1289  frames_ctx->height = avctx->coded_height;
1290  frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1291 
1292  return 0;
1293 }
1294 
1296  .p.name = "h263_videotoolbox",
1297  .p.type = AVMEDIA_TYPE_VIDEO,
1298  .p.id = AV_CODEC_ID_H263,
1299  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1300  .alloc_frame = ff_videotoolbox_alloc_frame,
1301  .start_frame = videotoolbox_mpeg_start_frame,
1302  .decode_slice = videotoolbox_mpeg_decode_slice,
1303  .end_frame = videotoolbox_mpeg_end_frame,
1304  .frame_params = ff_videotoolbox_frame_params,
1306  .uninit = ff_videotoolbox_uninit,
1307  .priv_data_size = sizeof(VTContext),
1308 };
1309 
1311  .p.name = "hevc_videotoolbox",
1312  .p.type = AVMEDIA_TYPE_VIDEO,
1313  .p.id = AV_CODEC_ID_HEVC,
1314  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1315  .alloc_frame = ff_videotoolbox_alloc_frame,
1316  .start_frame = videotoolbox_hevc_start_frame,
1317  .decode_slice = videotoolbox_hevc_decode_slice,
1318  .decode_params = videotoolbox_hevc_decode_params,
1319  .end_frame = videotoolbox_hevc_end_frame,
1320  .frame_params = ff_videotoolbox_frame_params,
1322  .uninit = ff_videotoolbox_uninit,
1323  .priv_data_size = sizeof(VTContext),
1324 };
1325 
1327  .p.name = "h264_videotoolbox",
1328  .p.type = AVMEDIA_TYPE_VIDEO,
1329  .p.id = AV_CODEC_ID_H264,
1330  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1331  .alloc_frame = ff_videotoolbox_alloc_frame,
1332  .start_frame = ff_videotoolbox_h264_start_frame,
1333  .decode_slice = ff_videotoolbox_h264_decode_slice,
1334  .decode_params = videotoolbox_h264_decode_params,
1335  .end_frame = videotoolbox_h264_end_frame,
1336  .frame_params = ff_videotoolbox_frame_params,
1338  .uninit = ff_videotoolbox_uninit,
1339  .priv_data_size = sizeof(VTContext),
1340 };
1341 
1343  .p.name = "mpeg1_videotoolbox",
1344  .p.type = AVMEDIA_TYPE_VIDEO,
1345  .p.id = AV_CODEC_ID_MPEG1VIDEO,
1346  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1347  .alloc_frame = ff_videotoolbox_alloc_frame,
1348  .start_frame = videotoolbox_mpeg_start_frame,
1349  .decode_slice = videotoolbox_mpeg_decode_slice,
1350  .end_frame = videotoolbox_mpeg_end_frame,
1351  .frame_params = ff_videotoolbox_frame_params,
1353  .uninit = ff_videotoolbox_uninit,
1354  .priv_data_size = sizeof(VTContext),
1355 };
1356 
1358  .p.name = "mpeg2_videotoolbox",
1359  .p.type = AVMEDIA_TYPE_VIDEO,
1360  .p.id = AV_CODEC_ID_MPEG2VIDEO,
1361  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1362  .alloc_frame = ff_videotoolbox_alloc_frame,
1363  .start_frame = videotoolbox_mpeg_start_frame,
1364  .decode_slice = videotoolbox_mpeg_decode_slice,
1365  .end_frame = videotoolbox_mpeg_end_frame,
1366  .frame_params = ff_videotoolbox_frame_params,
1368  .uninit = ff_videotoolbox_uninit,
1369  .priv_data_size = sizeof(VTContext),
1370 };
1371 
1373  .p.name = "mpeg4_videotoolbox",
1374  .p.type = AVMEDIA_TYPE_VIDEO,
1375  .p.id = AV_CODEC_ID_MPEG4,
1376  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1377  .alloc_frame = ff_videotoolbox_alloc_frame,
1378  .start_frame = videotoolbox_mpeg_start_frame,
1379  .decode_slice = videotoolbox_mpeg_decode_slice,
1380  .end_frame = videotoolbox_mpeg_end_frame,
1381  .frame_params = ff_videotoolbox_frame_params,
1383  .uninit = ff_videotoolbox_uninit,
1384  .priv_data_size = sizeof(VTContext),
1385 };
1386 
1388  .p.name = "prores_videotoolbox",
1389  .p.type = AVMEDIA_TYPE_VIDEO,
1390  .p.id = AV_CODEC_ID_PRORES,
1391  .p.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1392  .alloc_frame = ff_videotoolbox_alloc_frame,
1393  .start_frame = videotoolbox_prores_start_frame,
1394  .decode_slice = videotoolbox_prores_decode_slice,
1395  .end_frame = videotoolbox_prores_end_frame,
1396  .frame_params = ff_videotoolbox_frame_params,
1398  .uninit = ff_videotoolbox_uninit,
1399  .priv_data_size = sizeof(VTContext),
1400 };
1401 
1402 #endif /* CONFIG_VIDEOTOOLBOX */
videotoolbox_buffer_release
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:66
AVVideotoolboxContext::cm_codec_type
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:78
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1451
kCMVideoCodecType_VP9
@ kCMVideoCodecType_VP9
Definition: videotoolbox.c:56
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
APPEND_PS
#define APPEND_PS(T, t)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_videotoolbox_common_end_frame
int ff_videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_videotoolbox_uninit
int ff_videotoolbox_uninit(AVCodecContext *avctx)
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:197
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
FrameDecodeData
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:33
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:322
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
b
#define b
Definition: input.c:41
av_vt_pixbuf_set_attachments
int av_vt_pixbuf_set_attachments(void *log_ctx, CVPixelBufferRef pixbuf, const AVFrame *src)
Definition: hwcontext_videotoolbox.c:603
data
const char data[16]
Definition: mxf.c:148
ProresContext
Definition: proresdec.h:43
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
bytestream2_size_p
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:207
AV_W8
#define AV_W8(p, v)
Definition: videotoolbox.c:156
PTLCommon::profile_space
uint8_t profile_space
Definition: hevc_ps.h:124
COUNT_SIZE_PS
#define COUNT_SIZE_PS(T, t)
mpegvideo.h
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_mpeg2_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg2_videotoolbox_hwaccel
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:217
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVVideotoolboxContext
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:57
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
PTLCommon::profile_compatibility_flag
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:127
escape_ps
static int escape_ps(uint8_t *dst, const uint8_t *src, int src_size)
Definition: videotoolbox.c:158
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
PTLCommon::progressive_source_flag
uint8_t progressive_source_flag
Definition: hevc_ps.h:128
ff_hevc_videotoolbox_hwaccel
const struct FFHWAccel ff_hevc_videotoolbox_hwaccel
FFHWAccel
Definition: hwaccel_internal.h:34
PTLCommon::interlaced_source_flag
uint8_t interlaced_source_flag
Definition: hevc_ps.h:129
ff_videotoolbox_avcc_extradata_create
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:187
fail
#define fail()
Definition: checkasm.h:183
ff_h263_videotoolbox_hwaccel
const struct FFHWAccel ff_h263_videotoolbox_hwaccel
proresdec.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:644
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
VTContext::allocated_size
int allocated_size
Definition: vt_internal.h:33
kCMVideoCodecType_HEVC
@ kCMVideoCodecType_HEVC
Definition: videotoolbox.c:52
ff_videotoolbox_common_init
int ff_videotoolbox_common_init(AVCodecContext *avctx)
PTLCommon::frame_only_constraint_flag
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:131
videotoolbox.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VTContext::bitstream
uint8_t * bitstream
Definition: vt_internal.h:27
ff_videotoolbox_h264_start_frame
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:389
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:48
AVHWFramesContext::height
int height
Definition: hwcontext.h:217
duration
int64_t duration
Definition: movenc.c:65
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
bytestream2_put_ne24
#define bytestream2_put_ne24
Definition: bytestream.h:128
full_range
bool full_range
Definition: hwcontext_videotoolbox.c:46
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
width
#define width
vt_internal.h
PTLCommon
Definition: hevc_ps.h:123
s
#define s(width, name)
Definition: cbs_vp9.c:198
VTHWFrame
Definition: videotoolbox.c:61
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
ff_mpeg1_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg1_videotoolbox_hwaccel
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ff_videotoolbox_vpcc_extradata_create
CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox_vp9.c:65
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
PTLCommon::non_packed_constraint_flag
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:130
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
PTLCommon::profile_idc
uint8_t profile_idc
Definition: hevc_ps.h:126
AVVTFramesContext
Definition: hwcontext_videotoolbox.h:45
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
PTLCommon::tier_flag
uint8_t tier_flag
Definition: hevc_ps.h:125
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
VTContext::bitstream_size
int bitstream_size
Definition: vt_internal.h:30
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3281
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
hwaccel_internal.h
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
AVVTFramesContext::color_range
enum AVColorRange color_range
Definition: hwcontext_videotoolbox.h:46
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:126
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
V
#define V
Definition: avdct.c:31
AV_PIX_FMT_P410
#define AV_PIX_FMT_P410
Definition: pixfmt.h:540
AVVideotoolboxContext::session
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:61
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_videotoolbox_frame_params
int ff_videotoolbox_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
ff_videotoolbox_h264_decode_slice
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:449
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
videotoolbox_common_decode_slice
static int videotoolbox_common_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:426
VTHWFrame::pixbuf
CVPixelBufferRef pixbuf
Definition: videotoolbox.c:62
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:417
PutByteContext
Definition: bytestream.h:37
hwcontext_videotoolbox.h
ff_prores_videotoolbox_hwaccel
const struct FFHWAccel ff_prores_videotoolbox_hwaccel
ff_videotoolbox_hvcc_extradata_create
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:232
hevcdec.h
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
FrameDecodeData::post_process
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:44
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:123
P
#define P
av_bswap32
#define av_bswap32
Definition: bswap.h:28
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
VUI
Definition: hevc_ps.h:94
AV_PIX_FMT_AYUV64
#define AV_PIX_FMT_AYUV64
Definition: pixfmt.h:527
AVVideotoolboxContext::cm_fmt_desc
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:73
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:198
height
#define height
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AV_PIX_FMT_P216
#define AV_PIX_FMT_P216
Definition: pixfmt.h:543
AV_PIX_FMT_P210
#define AV_PIX_FMT_P210
Definition: pixfmt.h:539
VTContext
Definition: vt_internal.h:25
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2095
kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:45
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:332
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
AV_PIX_FMT_NV24
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:371
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
VTContext::frame
CVImageBufferRef frame
Definition: vt_internal.h:36
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
bytestream2_put_ne32
#define bytestream2_put_ne32
Definition: bytestream.h:129
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1475
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
bytestream2_put_ne16
#define bytestream2_put_ne16
Definition: bytestream.h:127
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_videotoolbox_alloc_frame
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:126
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:150
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:59
av_map_videotoolbox_format_to_pixfmt
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
Definition: hwcontext_videotoolbox.c:142
AVCodecContext
main external API structure.
Definition: avcodec.h:445
status
ov_status_e status
Definition: dnn_backend_openvino.c:101
VTContext::vt_ctx
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:43
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ff_mpeg4_videotoolbox_hwaccel
const struct FFHWAccel ff_mpeg4_videotoolbox_hwaccel
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
HEVCContext
Definition: hevcdec.h:444
PTLCommon::level_idc
uint8_t level_idc
Definition: hevc_ps.h:143
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
pps
uint64_t pps
Definition: dovi_rpuenc.c:35
videotoolbox_postproc_frame
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:95
VTContext::logctx
void * logctx
Definition: vt_internal.h:49
VTHWFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: videotoolbox.c:63
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
VUI::min_spatial_segmentation_idc
int min_spatial_segmentation_idc
Definition: hevc_ps.h:116
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
VTContext::cached_hw_frames_ctx
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avutil.h
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
HEVCVPS
Definition: hevc_ps.h:154
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1551
HEVCSPS
Definition: hevc_ps.h:188
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:305
ff_videotoolbox_buffer_copy
int ff_videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:75
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_PIX_FMT_P416
#define AV_PIX_FMT_P416
Definition: pixfmt.h:544
ff_h264_videotoolbox_hwaccel
const struct FFHWAccel ff_h264_videotoolbox_hwaccel
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVVideotoolboxContext::cv_pix_fmt_type
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:68
av_map_videotoolbox_format_from_pixfmt2
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
Definition: hwcontext_videotoolbox.c:176
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:664
videotoolbox_h264_decode_params
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:403
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
VTContext::reconfig_needed
bool reconfig_needed
Definition: vt_internal.h:47
VTContext::sps
uint8_t sps[3]
Definition: vt_internal.h:46
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:200