FFmpeg
hwcontext_videotoolbox.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "config.h"
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include <VideoToolbox/VideoToolbox.h>
25 
26 #include "buffer.h"
27 #include "common.h"
28 #include "hwcontext.h"
29 #include "hwcontext_internal.h"
30 #include "hwcontext_videotoolbox.h"
31 #include "mem.h"
32 #include "pixfmt.h"
33 #include "pixdesc.h"
34 
35 static const struct {
36  uint32_t cv_fmt;
37  bool full_range;
39 } cv_pix_fmts[] = {
40  { kCVPixelFormatType_420YpCbCr8Planar, false, AV_PIX_FMT_YUV420P },
41  { kCVPixelFormatType_422YpCbCr8, false, AV_PIX_FMT_UYVY422 },
42  { kCVPixelFormatType_32BGRA, false, AV_PIX_FMT_BGRA },
43 #ifdef kCFCoreFoundationVersionNumber10_7
44  { kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, false, AV_PIX_FMT_NV12 },
45  { kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, true, AV_PIX_FMT_NV12 },
46 #endif
47 #if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
49  { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange, true, AV_PIX_FMT_P010 },
50 #endif
51 };
52 
54 {
55  int i;
56  for (i = 0; i < FF_ARRAY_ELEMS(cv_pix_fmts); i++) {
57  if (cv_pix_fmts[i].cv_fmt == cv_fmt)
58  return cv_pix_fmts[i].pix_fmt;
59  }
60  return AV_PIX_FMT_NONE;
61 }
62 
64 {
65  return av_map_videotoolbox_format_from_pixfmt2(pix_fmt, false);
66 }
67 
69 {
70  int i;
71  for (i = 0; i < FF_ARRAY_ELEMS(cv_pix_fmts); i++) {
72  if (cv_pix_fmts[i].pix_fmt == pix_fmt && cv_pix_fmts[i].full_range == full_range)
73  return cv_pix_fmts[i].cv_fmt;
74  }
75  return 0;
76 }
77 
79 {
80  frame->buf[0] = av_buffer_pool_get(ctx->pool);
81  if (!frame->buf[0])
82  return AVERROR(ENOMEM);
83 
84  frame->data[3] = frame->buf[0]->data;
86  frame->width = ctx->width;
87  frame->height = ctx->height;
88 
89  return 0;
90 }
91 
94  enum AVPixelFormat **formats)
95 {
96  enum AVPixelFormat *fmts = av_malloc_array(2, sizeof(*fmts));
97  if (!fmts)
98  return AVERROR(ENOMEM);
99 
100  fmts[0] = ctx->sw_format;
101  fmts[1] = AV_PIX_FMT_NONE;
102 
103  *formats = fmts;
104  return 0;
105 }
106 
108 {
109  CVPixelBufferRef pixbuf = (CVPixelBufferRef)hwmap->source->data[3];
110 
111  CVPixelBufferUnlockBaseAddress(pixbuf, (uintptr_t)hwmap->priv);
112 }
113 
115  int flags)
116 {
117  CVPixelBufferRef pixbuf = (CVPixelBufferRef)src->data[3];
118  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
119  CVReturn err;
120  uint32_t map_flags = 0;
121  int ret;
122  int i;
123  enum AVPixelFormat format;
124 
125  format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
126  if (dst->format != format) {
127  av_log(ctx, AV_LOG_ERROR, "Unsupported or mismatching pixel format: %s\n",
128  av_fourcc2str(pixel_format));
129  return AVERROR_UNKNOWN;
130  }
131 
132  if (CVPixelBufferGetWidth(pixbuf) != ctx->width ||
133  CVPixelBufferGetHeight(pixbuf) != ctx->height) {
134  av_log(ctx, AV_LOG_ERROR, "Inconsistent frame dimensions.\n");
135  return AVERROR_UNKNOWN;
136  }
137 
138  if (flags == AV_HWFRAME_MAP_READ)
139  map_flags = kCVPixelBufferLock_ReadOnly;
140 
141  err = CVPixelBufferLockBaseAddress(pixbuf, map_flags);
142  if (err != kCVReturnSuccess) {
143  av_log(ctx, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
144  return AVERROR_UNKNOWN;
145  }
146 
147  if (CVPixelBufferIsPlanar(pixbuf)) {
148  int planes = CVPixelBufferGetPlaneCount(pixbuf);
149  for (i = 0; i < planes; i++) {
150  dst->data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
151  dst->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
152  }
153  } else {
154  dst->data[0] = CVPixelBufferGetBaseAddress(pixbuf);
155  dst->linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
156  }
157 
158  ret = ff_hwframe_map_create(src->hw_frames_ctx, dst, src, vt_unmap,
159  (void *)(uintptr_t)map_flags);
160  if (ret < 0)
161  goto unlock;
162 
163  return 0;
164 
165 unlock:
166  CVPixelBufferUnlockBaseAddress(pixbuf, map_flags);
167  return ret;
168 }
169 
171  AVFrame *dst, const AVFrame *src)
172 {
173  AVFrame *map;
174  int err;
175 
176  if (dst->width > hwfc->width || dst->height > hwfc->height)
177  return AVERROR(EINVAL);
178 
179  map = av_frame_alloc();
180  if (!map)
181  return AVERROR(ENOMEM);
182  map->format = dst->format;
183 
184  err = vt_map_frame(hwfc, map, src, AV_HWFRAME_MAP_READ);
185  if (err)
186  goto fail;
187 
188  map->width = dst->width;
189  map->height = dst->height;
190 
191  err = av_frame_copy(dst, map);
192  if (err)
193  goto fail;
194 
195  err = 0;
196 fail:
197  av_frame_free(&map);
198  return err;
199 }
200 
202  AVFrame *dst, const AVFrame *src)
203 {
204  AVFrame *map;
205  int err;
206 
207  if (src->width > hwfc->width || src->height > hwfc->height)
208  return AVERROR(EINVAL);
209 
210  map = av_frame_alloc();
211  if (!map)
212  return AVERROR(ENOMEM);
213  map->format = src->format;
214 
216  if (err)
217  goto fail;
218 
219  map->width = src->width;
220  map->height = src->height;
221 
222  err = av_frame_copy(map, src);
223  if (err)
224  goto fail;
225 
226  err = 0;
227 fail:
228  av_frame_free(&map);
229  return err;
230 }
231 
232 static int vt_device_create(AVHWDeviceContext *ctx, const char *device,
233  AVDictionary *opts, int flags)
234 {
235  if (device && device[0]) {
236  av_log(ctx, AV_LOG_ERROR, "Device selection unsupported.\n");
237  return AVERROR_UNKNOWN;
238  }
239 
240  return 0;
241 }
242 
245  .name = "videotoolbox",
246 
247  .device_create = vt_device_create,
248  .frames_get_buffer = vt_get_buffer,
249  .transfer_get_formats = vt_transfer_get_formats,
250  .transfer_data_to = vt_transfer_data_to,
251  .transfer_data_from = vt_transfer_data_from,
252 
253  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VIDEOTOOLBOX, AV_PIX_FMT_NONE },
254 };
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
The mapping must be readable.
Definition: hwcontext.h:503
Memory handling functions.
static int vt_map_frame(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
static int vt_transfer_data_to(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt)
Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format.
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:228
static int vt_transfer_get_formats(AVHWFramesContext *ctx, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats)
#define src
Definition: vp8dsp.c:254
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static const struct @322 planes[]
enum AVPixelFormat pix_fmt
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:634
#define AV_PIX_FMT_P010
Definition: pixfmt.h:436
enum AVHWDeviceType type
bool full_range
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
uint32_t av_map_videotoolbox_format_from_pixfmt2(enum AVPixelFormat pix_fmt, bool full_range)
Same as av_map_videotoolbox_format_from_pixfmt function, but can map and return full range pixel form...
An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX.
static int vt_transfer_data_from(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
static int vt_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
static void vt_unmap(AVHWFramesContext *ctx, HWMapDescriptor *hwmap)
#define av_log(a,...)
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
#define fail()
Definition: checkasm.h:122
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
AVDictionary * opts
Definition: movenc.c:50
AVFrame * source
A reference to the original source of the mapping.
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_ARRAY_ELEMS(a)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
The mapped frame will be overwritten completely in subsequent operations, so the current frame data n...
Definition: hwcontext.h:513
static int vt_device_create(AVHWDeviceContext *ctx, const char *device, AVDictionary *opts, int flags)
void * priv
Hardware-specific private data associated with the mapping.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
uint8_t * data
The data buffer.
Definition: buffer.h:89
const HWContextType ff_hwcontext_type_videotoolbox
The mapping must be writeable.
Definition: hwcontext.h:507
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
Definition: hwcontext.c:688
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
refcounted data buffer API
const VDPAUPixFmtMap * map
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal and external API header
uint32_t cv_fmt
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVHWFrameTransferDirection
Definition: hwcontext.h:394
pixel format definitions
AVBufferPool * pool
A pool from which the frames are allocated by av_hwframe_get_buffer().
Definition: hwcontext.h:189
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
int height
Definition: frame.h:353
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
#define av_malloc_array(a, b)
formats
Definition: signature.h:48
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static const struct @311 cv_pix_fmts[]