Go to the documentation of this file.
31 #include <DeckLinkAPI.h>
45 #include "libklvanc/vanc.h"
46 #include "libklvanc/vanc-lines.h"
47 #include "libklvanc/pixels.h"
65 return ((
GetWidth() + 47) / 48) * 128;
70 return bmdFormat8BitYUV;
72 return bmdFormat10BitYUV;
74 virtual BMDFrameFlags STDMETHODCALLTYPE
GetFlags (
void)
77 return _avframe->
linesize[0] < 0 ? bmdFrameFlagFlipVertical : bmdFrameFlagDefault;
79 return bmdFrameFlagDefault;
95 virtual HRESULT STDMETHODCALLTYPE
GetTimecode (BMDTimecodeFormat
format, IDeckLinkTimecode **timecode) {
return S_FALSE; }
96 virtual HRESULT STDMETHODCALLTYPE
GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
106 virtual HRESULT STDMETHODCALLTYPE
SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
114 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
151 if (
frame->_avpacket)
155 ctx->frames_buffer_available_spots++;
162 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
163 virtual ULONG STDMETHODCALLTYPE
AddRef(
void) {
return 1; }
164 virtual ULONG STDMETHODCALLTYPE
Release(
void) {
return 1; }
181 " Only AV_PIX_FMT_UYVY422 is supported.\n");
184 ctx->raw_format = bmdFormat8BitYUV;
187 " Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n");
190 ctx->raw_format = bmdFormat10BitYUV;
200 " Check available formats with -list_formats 1.\n");
203 if (
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputVANC) != S_OK) {
205 ctx->supports_vanc = 0;
207 if (!
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputFlagDefault) != S_OK) {
214 ctx->dlo->SetScheduledFrameCompletionCallback(
ctx->output_callback);
218 ctx->frames_preroll /= 1000;
221 ctx->frames_buffer =
ctx->frames_preroll * 2;
222 ctx->frames_buffer =
FFMIN(
ctx->frames_buffer, 60);
225 ctx->frames_buffer_available_spots =
ctx->frames_buffer;
228 avctx->
url,
ctx->frames_preroll,
ctx->frames_buffer);
254 if (
c->sample_rate != 48000) {
256 " Only 48kHz is supported.\n");
259 if (
c->ch_layout.nb_channels != 2 &&
c->ch_layout.nb_channels != 8 &&
c->ch_layout.nb_channels != 16) {
261 " Only 2, 8 or 16 channels are supported.\n");
264 ctx->channels =
c->ch_layout.nb_channels;
267 " Only PCM_S16LE and AC-3 are supported.\n");
271 if (
ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
272 bmdAudioSampleType16bitInteger,
274 bmdAudioOutputStreamTimestamped) != S_OK) {
278 if (
ctx->dlo->BeginAudioPreroll() != S_OK) {
298 uint16_t bitcount =
pkt->
size * 8;
299 uint8_t *s337_payload;
308 s337_payload = (uint8_t *)
av_malloc(payload_size);
309 if (s337_payload ==
NULL)
312 bytestream2_put_le16u(&pb, 0xf872);
313 bytestream2_put_le16u(&pb, 0x4e1f);
314 bytestream2_put_le16u(&pb, 0x0001);
315 bytestream2_put_le16u(&pb, bitcount);
323 bytestream2_put_le16u(&pb, 0);
325 *outsize = payload_size;
326 *outbuf = s337_payload;
373 if (
ctx->playback_started) {
375 ctx->dlo->StopScheduledPlayback(
ctx->last_pts *
ctx->bmd_tb_num,
376 &actual,
ctx->bmd_tb_den);
377 ctx->dlo->DisableVideoOutput();
379 ctx->dlo->DisableAudioOutput();
384 if (
ctx->output_callback)
385 delete ctx->output_callback;
391 klvanc_context_destroy(
ctx->vanc_ctx);
403 AVPacket *
pkt,
struct klvanc_line_set_s *vanc_lines)
405 struct klvanc_packet_eia_708b_s *cdp;
418 ret = klvanc_create_eia708_cdp(&cdp);
422 ret = klvanc_set_framerate_EIA_708B(cdp,
ctx->bmd_tb_num,
ctx->bmd_tb_den);
425 ctx->bmd_tb_num,
ctx->bmd_tb_den);
426 klvanc_destroy_eia708_cdp(cdp);
430 if (cc_count > KLVANC_MAX_CC_COUNT) {
432 cc_count = KLVANC_MAX_CC_COUNT;
436 cdp->header.ccdata_present = 1;
437 cdp->header.caption_service_active = 1;
438 cdp->ccdata.cc_count = cc_count;
439 for (
i = 0;
i < cc_count;
i++) {
440 if (
data [3*
i] & 0x04)
441 cdp->ccdata.cc[
i].cc_valid = 1;
442 cdp->ccdata.cc[
i].cc_type =
data[3*
i] & 0x03;
443 cdp->ccdata.cc[
i].cc_data[0] =
data[3*
i+1];
444 cdp->ccdata.cc[
i].cc_data[1] =
data[3*
i+2];
447 klvanc_finalize_EIA_708B(cdp,
ctx->cdp_sequence_num++);
448 ret = klvanc_convert_EIA_708B_to_words(cdp, &cdp_words, &
len);
449 klvanc_destroy_eia708_cdp(cdp);
455 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, cdp_words,
len, 11, 0);
465 AVPacket *
pkt,
struct klvanc_line_set_s *vanc_lines,
468 struct klvanc_packet_afd_s *afd =
NULL;
469 uint16_t *afd_words =
NULL;
472 int f1_line = 12, f2_line = 0,
ret;
478 ret = klvanc_create_AFD(&afd);
482 ret = klvanc_set_AFD_val(afd,
data[0]);
486 klvanc_destroy_AFD(afd);
496 afd->aspectRatio = ASPECT_16x9;
498 afd->aspectRatio = ASPECT_4x3;
500 ret = klvanc_convert_AFD_to_words(afd, &afd_words, &
len);
506 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, afd_words,
len, f1_line, 0);
514 switch (
ctx->bmd_mode) {
516 case bmdModeNTSC2398:
517 f2_line = 273 - 10 + f1_line;
520 f2_line = 319 - 6 + f1_line;
522 case bmdModeHD1080i50:
523 case bmdModeHD1080i5994:
524 case bmdModeHD1080i6000:
525 f2_line = 569 - 7 + f1_line;
533 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, afd_words,
len, f2_line, 0);
542 klvanc_destroy_AFD(afd);
566 struct klvanc_line_set_s vanc_lines = { 0 };
569 if (!
ctx->supports_vanc)
572 parse_608subs(avctx,
ctx,
pkt);
573 construct_cc(avctx,
ctx,
pkt, &vanc_lines);
574 construct_afd(avctx,
ctx,
pkt, &vanc_lines, st);
583 if (
pts >
ctx->last_pts) {
590 if (vanc_pkt.
pts + 1 <
ctx->last_pts) {
598 struct klvanc_smpte2038_anc_data_packet_s *pkt_2038 =
NULL;
600 klvanc_smpte2038_parse_pes_payload(vanc_pkt.
data, vanc_pkt.
size, &pkt_2038);
601 if (pkt_2038 ==
NULL) {
606 for (
int i = 0;
i < pkt_2038->lineCount;
i++) {
607 struct klvanc_smpte2038_anc_data_line_s *l = &pkt_2038->lines[
i];
608 uint16_t *vancWords =
NULL;
609 uint16_t vancWordCount;
611 if (klvanc_smpte2038_convert_line_to_words(l, &vancWords,
615 ret = klvanc_line_insert(
ctx->vanc_ctx, &vanc_lines, vancWords,
616 vancWordCount, l->line_number, 0);
623 klvanc_smpte2038_anc_data_packet_free(pkt_2038);
628 IDeckLinkVideoFrameAncillary *vanc;
629 int result =
ctx->dlo->CreateAncillaryData(bmdFormat10BitYUV, &vanc);
638 for (
i = 0;
i < vanc_lines.num_lines;
i++) {
639 struct klvanc_line_s *
line = vanc_lines.lines[
i];
648 real_line =
line->line_number;
650 result = vanc->GetBufferForVerticalBlankingLine(real_line, &buf);
657 result = klvanc_generate_vanc_line_v210(
ctx->vanc_ctx,
line, (uint8_t *) buf,
673 for (
i = 0;
i < vanc_lines.num_lines;
i++)
674 klvanc_line_free(vanc_lines.lines[
i]);
695 tmp->width !=
ctx->bmd_width ||
696 tmp->height !=
ctx->bmd_height) {
718 if (decklink_construct_vanc(avctx,
ctx,
pkt,
frame, st))
732 while (
ctx->frames_buffer_available_spots == 0) {
735 ctx->frames_buffer_available_spots--;
742 hr =
ctx->dlo->ScheduleVideoFrame((
class IDeckLinkVideoFrame *)
frame,
744 ctx->bmd_tb_num,
ctx->bmd_tb_den);
749 " error %08x.\n", (uint32_t) hr);
753 ctx->dlo->GetBufferedVideoFrameCount(&buffered);
755 if (
pkt->
pts > 2 && buffered <= 2)
757 " Video may misbehave!\n");
760 if (!
ctx->playback_started &&
pkt->
pts > (
ctx->first_pts +
ctx->frames_preroll)) {
762 if (
ctx->audio &&
ctx->dlo->EndAudioPreroll() != S_OK) {
767 if (
ctx->dlo->StartScheduledPlayback(
ctx->first_pts *
ctx->bmd_tb_num,
ctx->bmd_tb_den, 1.0) != S_OK) {
771 ctx->playback_started = 1;
784 uint8_t *outbuf =
NULL;
787 ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
788 if (
pkt->
pts > 1 && !buffered)
790 " Audio will misbehave!\n");
798 sample_count = outbuf_size / 4;
800 sample_count =
pkt->
size / (
ctx->channels << 1);
804 if (
ctx->dlo->ScheduleAudioSamples(outbuf, sample_count,
pkt->
pts,
805 bmdAudioSampleRate48kHz,
NULL) != S_OK) {
859 if (klvanc_context_create(&
ctx->vanc_ctx) < 0) {
863 ctx->supports_vanc = 1;
867 if (
ctx->list_devices) {
877 if (
ctx->dl->QueryInterface(IID_IDeckLinkOutput, (
void **) &
ctx->dlo) != S_OK) {
885 if (
ctx->list_formats) {
static void error(const char *err)
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void)
int64_t ff_decklink_packet_queue_peekpts(DecklinkPacketQueue *q)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_decklink_packet_queue_put(DecklinkPacketQueue *q, AVPacket *pkt)
int ff_ccfifo_extractbytes(CCFifo *ccf, uint8_t *cc_bytes, size_t len)
Just like ff_ccfifo_extract(), but takes the raw bytes instead of an AVFrame.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
virtual ULONG STDMETHODCALLTYPE Release(void)
AVStream ** streams
A list of all streams in the file.
virtual long STDMETHODCALLTYPE GetHeight(void)
void ff_decklink_packet_queue_end(DecklinkPacketQueue *q)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
IDeckLinkVideoFrameAncillary * _ancillary
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
void ff_ccfifo_uninit(CCFifo *ccf)
Free all memory allocated in a CCFifo and clear the context.
static int decklink_write_subtitle_packet(AVFormatContext *avctx, AVPacket *pkt)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
static int ff_ccfifo_getoutputsize(const CCFifo *ccf)
Provide the size in bytes of an output buffer to allocate.
static int decklink_write_data_packet(AVFormatContext *avctx, AVPacket *pkt)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational sample_aspect_ratio
Video only.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags(void)
int ff_decklink_packet_queue_get(DecklinkPacketQueue *q, AVPacket *pkt, int block)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
static int decklink_setup_data(AVFormatContext *avctx, AVStream *st)
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Rational number (pair of numerator and denominator).
virtual HRESULT STDMETHODCALLTYPE SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define pthread_mutex_unlock(a)
AVCodecID
Identify the syntax and semantics of the bitstream.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
char * url
input or output URL.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
#define AV_NOPTS_VALUE
Undefined timestamp value.
void ff_decklink_packet_queue_init(AVFormatContext *avctx, DecklinkPacketQueue *q, int64_t queue_size)
static const BMDLinkConfiguration decklink_link_conf_map[]
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
void ff_decklink_cleanup(AVFormatContext *avctx)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode **timecode)
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, AVCodecID codec_id, int height, int width)
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
virtual ULONG STDMETHODCALLTYPE Release(void)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
virtual ULONG STDMETHODCALLTYPE AddRef(void)
int ff_ccfifo_init(CCFifo *ccf, AVRational framerate, void *log_ctx)
Initialize a CCFifo.
int ff_ccfifo_injectbytes(CCFifo *ccf, uint8_t *cc_data, size_t len)
Just like ff_ccfifo_inject(), but takes the raw bytes to insert the CC data int rather than an AVFram...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
struct decklink_ctx * _ctx
unsigned long long ff_decklink_packet_queue_size(DecklinkPacketQueue *q)
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
virtual long STDMETHODCALLTYPE GetWidth(void)
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void)
virtual long STDMETHODCALLTYPE GetRowBytes(void)
static int ff_ccfifo_ccdetected(const CCFifo *ccf)
Returns 1 if captions have been found as a prior call to ff_ccfifo_extract() or ff_ccfifo_extractbyte...
@ AV_PKT_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
decklink_frame(struct decklink_ctx *ctx, AVPacket *avpacket, AVCodecID codec_id, int height, int width)
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
static int create_s337_payload(AVPacket *pkt, uint8_t **outbuf, int *outsize)
void * priv_data
Format private data.
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
static int decklink_setup_subtitle(AVFormatContext *avctx, AVStream *st)
AVPacket * av_packet_clone(const AVPacket *src)
Create a new packet that references the same data as src.
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
#define pthread_mutex_lock(a)
virtual HRESULT STDMETHODCALLTYPE GetBytes(void **buffer)