FFmpeg
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 using std::atomic;
26 
27 /* Include internal.h first to avoid conflict between winsock.h (used by
28  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
29 extern "C" {
30 #include "libavformat/internal.h"
31 }
32 
33 #include <DeckLinkAPI.h>
34 
35 extern "C" {
36 #include "config.h"
37 #include "libavformat/avformat.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/avutil.h"
40 #include "libavutil/common.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/intreadwrite.h"
43 #include "libavutil/time.h"
44 #include "libavutil/mathematics.h"
45 #include "libavutil/reverse.h"
46 #include "avdevice.h"
47 #if CONFIG_LIBZVBI
48 #include <libzvbi.h>
49 #endif
50 }
51 
52 #include "decklink_common.h"
53 #include "decklink_dec.h"
54 
55 #define MAX_WIDTH_VANC 1920
56 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
57 
58 typedef struct VANCLineNumber {
59  BMDDisplayMode mode;
63  int vanc_end;
65 
66 /* These VANC line numbers need not be very accurate. In any case
67  * GetBufferForVerticalBlankingLine() will return an error when invalid
68  * ancillary line number was requested. We just need to make sure that the
69  * entire VANC region is covered, while making sure we don't decode VANC of
70  * another source during switching*/
72  /* SD Modes */
73 
74  {bmdModeNTSC, 11, 19, 274, 282},
75  {bmdModeNTSC2398, 11, 19, 274, 282},
76  {bmdModePAL, 7, 22, 320, 335},
77  {bmdModeNTSCp, 11, -1, -1, 39},
78  {bmdModePALp, 7, -1, -1, 45},
79 
80  /* HD 1080 Modes */
81 
82  {bmdModeHD1080p2398, 8, -1, -1, 42},
83  {bmdModeHD1080p24, 8, -1, -1, 42},
84  {bmdModeHD1080p25, 8, -1, -1, 42},
85  {bmdModeHD1080p2997, 8, -1, -1, 42},
86  {bmdModeHD1080p30, 8, -1, -1, 42},
87  {bmdModeHD1080i50, 8, 20, 570, 585},
88  {bmdModeHD1080i5994, 8, 20, 570, 585},
89  {bmdModeHD1080i6000, 8, 20, 570, 585},
90  {bmdModeHD1080p50, 8, -1, -1, 42},
91  {bmdModeHD1080p5994, 8, -1, -1, 42},
92  {bmdModeHD1080p6000, 8, -1, -1, 42},
93 
94  /* HD 720 Modes */
95 
96  {bmdModeHD720p50, 8, -1, -1, 26},
97  {bmdModeHD720p5994, 8, -1, -1, 26},
98  {bmdModeHD720p60, 8, -1, -1, 26},
99 
100  /* For all other modes, for which we don't support VANC */
101  {bmdModeUnknown, 0, -1, -1, -1}
102 };
103 
104 class decklink_allocator : public IDeckLinkMemoryAllocator
105 {
106 public:
107  decklink_allocator(): _refs(1) { }
108  virtual ~decklink_allocator() { }
109 
110  // IDeckLinkMemoryAllocator methods
111  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
112  {
113  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
114  if (!buf)
115  return E_OUTOFMEMORY;
116  *allocatedBuffer = buf;
117  return S_OK;
118  }
119  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
120  {
121  av_free(buffer);
122  return S_OK;
123  }
124  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
125  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
126 
127  // IUnknown methods
128  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
129  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
130  virtual ULONG STDMETHODCALLTYPE Release(void)
131  {
132  int ret = --_refs;
133  if (!ret)
134  delete this;
135  return ret;
136  }
137 
138 private:
139  std::atomic<int> _refs;
140 };
141 
142 extern "C" {
143 static void decklink_object_free(void *opaque, uint8_t *data)
144 {
145  IUnknown *obj = (class IUnknown *)opaque;
146  obj->Release();
147 }
148 }
149 
150 static int get_vanc_line_idx(BMDDisplayMode mode)
151 {
152  unsigned int i;
153  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
154  if (mode == vanc_line_numbers[i].mode)
155  return i;
156  }
157  /* Return the VANC idx for Unknown mode */
158  return i - 1;
159 }
160 
161 static inline void clear_parity_bits(uint16_t *buf, int len) {
162  int i;
163  for (i = 0; i < len; i++)
164  buf[i] &= 0xff;
165 }
166 
167 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
168  int i;
169  uint16_t vanc_sum = 0;
170  for (i = 3; i < len - 1; i++) {
171  uint16_t v = buf[i];
172  int np = v >> 8;
173  int p = av_parity(v & 0xff);
174  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
175  // Parity check failed
176  return -1;
177  }
178  vanc_sum += v;
179  }
180  vanc_sum &= 0x1ff;
181  vanc_sum |= ((~vanc_sum & 0x100) << 1);
182  if (checksum != vanc_sum) {
183  // Checksum verification failed
184  return -1;
185  }
186  return 0;
187 }
188 
189 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
190 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
191 {
192  int i;
193  for (i = 0; i < width / 3; i++) {
194  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
195  *dst++ = src[4] + ((src[5] & 3) << 8);
196  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
197  src += 8;
198  }
199 }
200 
201 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
202 {
203  int i;
204  for (i = 0; i < width * 2 / 3; i++) {
205  *dst++ = src[0] + ((src[1] & 3) << 8);
206  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
207  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
208  src += 4;
209  }
210 }
211 
213 {
214  uint8_t ret = (line < 313) << 5;
215  if (line >= 7 && line <= 22)
216  ret += line;
217  if (line >= 320 && line <= 335)
218  ret += (line - 313);
219  return ret;
220 }
221 
222 static void fill_data_unit_head(int line, uint8_t *tgt)
223 {
224  tgt[0] = 0x02; // data_unit_id
225  tgt[1] = 0x2c; // data_unit_length
226  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
227  tgt[3] = 0xe4; // framing code
228 }
229 
230 #if CONFIG_LIBZVBI
231 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
232 {
233  vbi_bit_slicer slicer;
234 
235  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
236 
237  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
238  return tgt;
239 
241 
242  return tgt + 46;
243 }
244 
245 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
246 {
247  uint8_t y[720];
248  uint8_t *py = y;
249  uint8_t *pend = y + 720;
250  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
251  * so we extract the 8 MSBs of the luma component, that is enough for
252  * teletext bit slicing. */
253  while (py < pend) {
254  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
255  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
256  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
257  src += 8;
258  }
259  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
260 }
261 #endif
262 
264 {
265  int i;
266 
267  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
268  return tgt;
269 
270  fill_data_unit_head(line, tgt);
271 
272  py += 3;
273  tgt += 4;
274 
275  for (i = 0; i < 42; i++)
276  *tgt++ = ff_reverse[py[i] & 255];
277 
278  return tgt;
279 }
280 
281 static int linemask_matches(int line, int64_t mask)
282 {
283  int shift = -1;
284  if (line >= 6 && line <= 22)
285  shift = line - 6;
286  if (line >= 318 && line <= 335)
287  shift = line - 318 + 17;
288  return shift >= 0 && ((1ULL << shift) & mask);
289 }
290 
291 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
292 {
293  if (py < pend - 9) {
294  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
295  uint16_t *descriptors = py + 4;
296  int i;
297  py += 9;
298  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
299  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
300  if (line && linemask_matches(line, wanted_lines))
301  tgt = teletext_data_unit_from_op47_vbi_packet(line, py, tgt);
302  }
303  }
304  }
305  return tgt;
306 }
307 
308 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
309 {
310  uint16_t did = py[0]; // data id
311  uint16_t sdid = py[1]; // secondary data id
312  uint16_t dc = py[2] & 255; // data count
313  py += 3;
314  pend = FFMIN(pend, py + dc);
315  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
316  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
317  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
318  py += 2; // priority, line/field
319  while (py < pend - 3) {
320  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
321  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
322  }
323  }
324  return tgt;
325 }
326 
327 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
328  unsigned &cc_count)
329 {
330  size_t i, len = (buf[5] & 0xff) + 6 + 1;
331  uint8_t cdp_sum, rate;
332  uint16_t hdr, ftr;
333  uint8_t *cc;
334  uint16_t *cdp = &buf[6]; // CDP follows
335  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
336  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
337  return NULL;
338  }
339 
340  len -= 7; // remove VANC header and checksum
341 
342  if (cdp[2] != len) {
343  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
344  return NULL;
345  }
346 
347  cdp_sum = 0;
348  for (i = 0; i < len - 1; i++)
349  cdp_sum += cdp[i];
350  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
351  if (cdp[len - 1] != cdp_sum) {
352  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
353  return NULL;
354  }
355 
356  rate = cdp[3];
357  if (!(rate & 0x0f)) {
358  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
359  return NULL;
360  }
361  rate >>= 4;
362  if (rate > 8) {
363  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
364  return NULL;
365  }
366 
367  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
368  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
369  return NULL;
370  }
371 
372  hdr = (cdp[5] << 8) | cdp[6];
373  if (cdp[7] != 0x72) /* ccdata_id */ {
374  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
375  return NULL;
376  }
377 
378  cc_count = cdp[8];
379  if (!(cc_count & 0xe0)) {
380  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
381  return NULL;
382  }
383 
384  cc_count &= 0x1f;
385  if ((len - 13) < cc_count * 3) {
386  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
387  return NULL;
388  }
389 
390  if (cdp[len - 4] != 0x74) /* footer id */ {
391  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
392  return NULL;
393  }
394 
395  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
396  if (ftr != hdr) {
397  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
398  return NULL;
399  }
400 
401  cc = (uint8_t *)av_malloc(cc_count * 3);
402  if (cc == NULL) {
403  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
404  return NULL;
405  }
406 
407  for (size_t i = 0; i < cc_count; i++) {
408  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
409  cc[3*i + 1] = cdp[9 + 3*i+1];
410  cc[3*i + 2] = cdp[9 + 3*i+2];
411  }
412 
413  cc_count *= 3;
414  return cc;
415 }
416 
417 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
418  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
419 {
420  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
421  uint16_t *max_buf = buf + width;
422 
423  while (buf < max_buf - 6) {
424  int len;
425  uint16_t did = buf[3] & 0xFF; // data id
426  uint16_t sdid = buf[4] & 0xFF; // secondary data id
427  /* Check for VANC header */
428  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
429  return tgt;
430  }
431 
432  len = (buf[5] & 0xff) + 6 + 1;
433  if (len > max_buf - buf) {
434  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
435  len, max_buf - buf);
436  return tgt;
437  }
438 
439  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
440  width == 1920 && tgt_size >= 1920) {
441  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
442  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
443  goto skip_packet;
444  }
445  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
446  } else if (did == 0x61 && sdid == 0x01) {
447  unsigned int data_len;
448  uint8_t *data;
449  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
450  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
451  goto skip_packet;
452  }
453  clear_parity_bits(buf, len);
454  data = vanc_to_cc(avctx, buf, width, data_len);
455  if (data) {
456  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
457  av_free(data);
458  }
459  } else {
460  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
461  did, sdid);
462  }
463 skip_packet:
464  buf += len;
465  }
466 
467  return tgt;
468 }
469 
471 {
472  struct decklink_cctx *ctx = (struct decklink_cctx *)avctx->priv_data;
473  memset(q, 0, sizeof(AVPacketQueue));
476  q->avctx = avctx;
477  q->max_q_size = ctx->queue_size;
478 }
479 
481 {
482  AVPacketList *pkt, *pkt1;
483 
485  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
486  pkt1 = pkt->next;
487  av_packet_unref(&pkt->pkt);
488  av_freep(&pkt);
489  }
490  q->last_pkt = NULL;
491  q->first_pkt = NULL;
492  q->nb_packets = 0;
493  q->size = 0;
495 }
496 
498 {
502 }
503 
504 static unsigned long long avpacket_queue_size(AVPacketQueue *q)
505 {
506  unsigned long long size;
508  size = q->size;
510  return size;
511 }
512 
514 {
515  AVPacketList *pkt1;
516 
517  // Drop Packet if queue size is > maximum queue size
518  if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
519  av_packet_unref(pkt);
520  av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
521  return -1;
522  }
523  /* ensure the packet is reference counted */
524  if (av_packet_make_refcounted(pkt) < 0) {
525  av_packet_unref(pkt);
526  return -1;
527  }
528 
529  pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
530  if (!pkt1) {
531  av_packet_unref(pkt);
532  return -1;
533  }
534  av_packet_move_ref(&pkt1->pkt, pkt);
535  pkt1->next = NULL;
536 
538 
539  if (!q->last_pkt) {
540  q->first_pkt = pkt1;
541  } else {
542  q->last_pkt->next = pkt1;
543  }
544 
545  q->last_pkt = pkt1;
546  q->nb_packets++;
547  q->size += pkt1->pkt.size + sizeof(*pkt1);
548 
550 
552  return 0;
553 }
554 
556 {
557  AVPacketList *pkt1;
558  int ret;
559 
561 
562  for (;; ) {
563  pkt1 = q->first_pkt;
564  if (pkt1) {
565  q->first_pkt = pkt1->next;
566  if (!q->first_pkt) {
567  q->last_pkt = NULL;
568  }
569  q->nb_packets--;
570  q->size -= pkt1->pkt.size + sizeof(*pkt1);
571  *pkt = pkt1->pkt;
572  av_free(pkt1);
573  ret = 1;
574  break;
575  } else if (!block) {
576  ret = 0;
577  break;
578  } else {
579  pthread_cond_wait(&q->cond, &q->mutex);
580  }
581  }
583  return ret;
584 }
585 
586 class decklink_input_callback : public IDeckLinkInputCallback
587 {
588 public:
591 
592  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
593  virtual ULONG STDMETHODCALLTYPE AddRef(void);
594  virtual ULONG STDMETHODCALLTYPE Release(void);
595  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
596  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
597 
598 private:
599  std::atomic<int> _refs;
602  int no_video;
605 };
606 
608 {
609  avctx = _avctx;
610  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
611  ctx = (struct decklink_ctx *)cctx->ctx;
612  no_video = 0;
614 }
615 
617 {
618 }
619 
621 {
622  return ++_refs;
623 }
624 
626 {
627  int ret = --_refs;
628  if (!ret)
629  delete this;
630  return ret;
631 }
632 
633 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
634  IDeckLinkAudioInputPacket *audioFrame,
635  int64_t wallclock,
636  int64_t abs_wallclock,
637  DecklinkPtsSource pts_src,
638  AVRational time_base, int64_t *initial_pts,
639  int copyts)
640 {
641  int64_t pts = AV_NOPTS_VALUE;
642  BMDTimeValue bmd_pts;
643  BMDTimeValue bmd_duration;
644  HRESULT res = E_INVALIDARG;
645  switch (pts_src) {
646  case PTS_SRC_AUDIO:
647  if (audioFrame)
648  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
649  break;
650  case PTS_SRC_VIDEO:
651  if (videoFrame)
652  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
653  break;
654  case PTS_SRC_REFERENCE:
655  if (videoFrame)
656  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
657  break;
658  case PTS_SRC_WALLCLOCK:
659  /* fall through */
661  {
662  /* MSVC does not support compound literals like AV_TIME_BASE_Q
663  * in C++ code (compiler error C4576) */
664  AVRational timebase;
665  timebase.num = 1;
666  timebase.den = AV_TIME_BASE;
667  if (pts_src == PTS_SRC_WALLCLOCK)
668  pts = av_rescale_q(wallclock, timebase, time_base);
669  else
670  pts = av_rescale_q(abs_wallclock, timebase, time_base);
671  break;
672  }
673  }
674  if (res == S_OK)
675  pts = bmd_pts / time_base.num;
676 
677  if (!copyts) {
678  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
679  *initial_pts = pts;
680  if (*initial_pts != AV_NOPTS_VALUE)
681  pts -= *initial_pts;
682  }
683 
684  return pts;
685 }
686 
688  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
689 {
690  void *frameBytes;
691  void *audioFrameBytes;
692  BMDTimeValue frameTime;
693  BMDTimeValue frameDuration;
694  int64_t wallclock = 0, abs_wallclock = 0;
695  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
696 
697  if (ctx->autodetect) {
698  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
699  ctx->bmd_mode == bmdModeUnknown)
700  {
702  }
703  return S_OK;
704  }
705 
706  // Drop the frames till system's timestamp aligns with the configured value.
707  if (0 == ctx->frameCount && cctx->timestamp_align) {
708  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
709  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
710  if (av_cmp_q(remainder, frame_duration) > 0) {
711  ++ctx->dropped;
712  return S_OK;
713  }
714  }
715 
716  ctx->frameCount++;
718  wallclock = av_gettime_relative();
720  abs_wallclock = av_gettime();
721 
722  // Handle Video Frame
723  if (videoFrame) {
724  AVPacket pkt;
725  av_init_packet(&pkt);
726  if (ctx->frameCount % 25 == 0) {
727  unsigned long long qsize = avpacket_queue_size(&ctx->queue);
729  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
730  ctx->frameCount,
731  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
732  (double)qsize / 1024 / 1024);
733  }
734 
735  videoFrame->GetBytes(&frameBytes);
736  videoFrame->GetStreamTime(&frameTime, &frameDuration,
738 
739  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
740  if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
741  unsigned bars[8] = {
742  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
743  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
744  int width = videoFrame->GetWidth();
745  int height = videoFrame->GetHeight();
746  unsigned *p = (unsigned *)frameBytes;
747 
748  for (int y = 0; y < height; y++) {
749  for (int x = 0; x < width; x += 2)
750  *p++ = bars[(x * 8) / width];
751  }
752  }
753 
754  if (!no_video) {
755  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
756  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
757  }
758  no_video = 1;
759  } else {
760  if (no_video) {
761  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
762  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
763  }
764  no_video = 0;
765 
766  // Handle Timecode (if requested)
767  if (ctx->tc_format) {
768  IDeckLinkTimecode *timecode;
769  if (videoFrame->GetTimecode(ctx->tc_format, &timecode) == S_OK) {
770  const char *tc = NULL;
771  DECKLINK_STR decklink_tc;
772  if (timecode->GetString(&decklink_tc) == S_OK) {
773  tc = DECKLINK_STRDUP(decklink_tc);
774  DECKLINK_FREE(decklink_tc);
775  }
776  timecode->Release();
777  if (tc) {
778  AVDictionary* metadata_dict = NULL;
779  int metadata_len;
780  uint8_t* packed_metadata;
781  if (av_dict_set(&metadata_dict, "timecode", tc, AV_DICT_DONT_STRDUP_VAL) >= 0) {
782  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
783  av_dict_free(&metadata_dict);
784  if (packed_metadata) {
785  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
786  av_freep(&packed_metadata);
787  else if (!ctx->tc_seen)
789  }
790  }
791  }
792  } else {
793  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
794  }
795  }
796  }
797 
798  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
799 
800  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
801  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
802  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
803  return S_OK;
804  }
805 
806  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
807  pkt.dts = pkt.pts;
808 
809  pkt.duration = frameDuration;
810  //To be made sure it still applies
811  pkt.flags |= AV_PKT_FLAG_KEY;
812  pkt.stream_index = ctx->video_st->index;
813  pkt.data = (uint8_t *)frameBytes;
814  pkt.size = videoFrame->GetRowBytes() *
815  videoFrame->GetHeight();
816  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
817 
818  if (!no_video) {
819  IDeckLinkVideoFrameAncillary *vanc;
820  AVPacket txt_pkt;
821  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
822  uint8_t *txt_buf = txt_buf0;
823 
824  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
825  int i;
826  int64_t line_mask = 1;
827  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
828  txt_buf[0] = 0x10; // data_identifier - EBU_data
829  txt_buf++;
830 #if CONFIG_LIBZVBI
831  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
832  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
833  av_assert0(videoFrame->GetWidth() == 720);
834  for (i = 6; i < 336; i++, line_mask <<= 1) {
835  uint8_t *buf;
836  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
837  if (vanc_format == bmdFormat8BitYUV)
838  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
839  else
840  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
841  }
842  if (i == 22)
843  i = 317;
844  }
845  }
846 #endif
847  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
848  int idx = get_vanc_line_idx(ctx->bmd_mode);
849  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
850  uint8_t *buf;
851  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
852  uint16_t vanc[MAX_WIDTH_VANC];
853  size_t vanc_size = videoFrame->GetWidth();
854  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
855  vanc_size = vanc_size * 2;
856  unpack_v210(vanc, buf, videoFrame->GetWidth());
857  } else {
858  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
859  }
860  txt_buf = get_metadata(avctx, vanc, vanc_size,
861  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
862  }
863  if (i == vanc_line_numbers[idx].field0_vanc_end)
864  i = vanc_line_numbers[idx].field1_vanc_start - 1;
865  }
866  }
867  vanc->Release();
868  if (txt_buf - txt_buf0 > 1) {
869  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
870  while (stuffing_units--) {
871  memset(txt_buf, 0xff, 46);
872  txt_buf[1] = 0x2c; // data_unit_length
873  txt_buf += 46;
874  }
875  av_init_packet(&txt_pkt);
876  txt_pkt.pts = pkt.pts;
877  txt_pkt.dts = pkt.dts;
878  txt_pkt.stream_index = ctx->teletext_st->index;
879  txt_pkt.data = txt_buf0;
880  txt_pkt.size = txt_buf - txt_buf0;
881  if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
882  ++ctx->dropped;
883  }
884  }
885  }
886  }
887 
888  pkt.buf = av_buffer_create(pkt.data, pkt.size, decklink_object_free, videoFrame, 0);
889  if (pkt.buf)
890  videoFrame->AddRef();
891 
892  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
893  ++ctx->dropped;
894  }
895  }
896 
897  // Handle Audio Frame
898  if (audioFrame) {
899  AVPacket pkt;
900  BMDTimeValue audio_pts;
901  av_init_packet(&pkt);
902 
903  //hack among hacks
904  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
905  audioFrame->GetBytes(&audioFrameBytes);
906  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
907  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
908  pkt.dts = pkt.pts;
909 
910  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
911  pkt.flags |= AV_PKT_FLAG_KEY;
912  pkt.stream_index = ctx->audio_st->index;
913  pkt.data = (uint8_t *)audioFrameBytes;
914 
915  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
916  ++ctx->dropped;
917  }
918  }
919 
920  return S_OK;
921 }
922 
924  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
925  BMDDetectedVideoInputFormatFlags)
926 {
927  ctx->bmd_mode = mode->GetDisplayMode();
928  return S_OK;
929 }
930 
931 static int decklink_autodetect(struct decklink_cctx *cctx) {
932  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
933  DECKLINK_BOOL autodetect_supported = false;
934  int i;
935 
936  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
937  return -1;
938  if (autodetect_supported == false)
939  return -1;
940 
941  ctx->autodetect = 1;
942  ctx->bmd_mode = bmdModeUnknown;
943  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
944  bmdFormat8BitYUV,
945  bmdVideoInputEnableFormatDetection) != S_OK) {
946  return -1;
947  }
948 
949  if (ctx->dli->StartStreams() != S_OK) {
950  return -1;
951  }
952 
953  // 1 second timeout
954  for (i = 0; i < 10; i++) {
955  av_usleep(100000);
956  /* Sometimes VideoInputFrameArrived is called without the
957  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
958  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
959  if (ctx->bmd_mode != bmdModeUnknown &&
961  break;
962  }
963 
964  ctx->dli->PauseStreams();
965  ctx->dli->FlushStreams();
966  ctx->autodetect = 0;
967  if (ctx->bmd_mode != bmdModeUnknown) {
968  cctx->format_code = (char *)av_mallocz(5);
969  if (!cctx->format_code)
970  return -1;
971  AV_WB32(cctx->format_code, ctx->bmd_mode);
972  return 0;
973  } else {
974  return -1;
975  }
976 
977 }
978 
979 extern "C" {
980 
982 {
983  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
984  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
985 
986  if (ctx->dli) {
987  ctx->dli->StopStreams();
988  ctx->dli->DisableVideoInput();
989  ctx->dli->DisableAudioInput();
990  }
991 
992  ff_decklink_cleanup(avctx);
994 
995  av_freep(&cctx->ctx);
996 
997  return 0;
998 }
999 
1001 {
1002  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1003  struct decklink_ctx *ctx;
1004  class decklink_allocator *allocator;
1006  AVStream *st;
1007  HRESULT result;
1008  int ret;
1009 
1010  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1011  if (!ctx)
1012  return AVERROR(ENOMEM);
1013  ctx->list_devices = cctx->list_devices;
1014  ctx->list_formats = cctx->list_formats;
1016  ctx->preroll = cctx->preroll;
1017  ctx->duplex_mode = cctx->duplex_mode;
1018  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1020  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1022  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1026  ctx->draw_bars = cctx->draw_bars;
1027  ctx->audio_depth = cctx->audio_depth;
1028  cctx->ctx = ctx;
1029 
1030  /* Check audio channel option for valid values: 2, 8 or 16 */
1031  switch (cctx->audio_channels) {
1032  case 2:
1033  case 8:
1034  case 16:
1035  break;
1036  default:
1037  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1038  return AVERROR(EINVAL);
1039  }
1040 
1041  /* Check audio bit depth option for valid values: 16 or 32 */
1042  switch (cctx->audio_depth) {
1043  case 16:
1044  case 32:
1045  break;
1046  default:
1047  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1048  return AVERROR(EINVAL);
1049  }
1050 
1051  /* List available devices. */
1052  if (ctx->list_devices) {
1053  av_log(avctx, AV_LOG_WARNING, "The -list_devices option is deprecated and will be removed. Please use ffmpeg -sources decklink instead.\n");
1054  ff_decklink_list_devices_legacy(avctx, 1, 0);
1055  return AVERROR_EXIT;
1056  }
1057 
1058  ret = ff_decklink_init_device(avctx, avctx->url);
1059  if (ret < 0)
1060  return ret;
1061 
1062  /* Get input device. */
1063  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1064  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1065  avctx->url);
1066  ret = AVERROR(EIO);
1067  goto error;
1068  }
1069 
1070  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1071  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1072  ret = AVERROR(EIO);
1073  goto error;
1074  }
1075 
1076  /* List supported formats. */
1077  if (ctx->list_formats) {
1079  ret = AVERROR_EXIT;
1080  goto error;
1081  }
1082 
1084  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1085  input_callback->Release();
1086  if (ret < 0) {
1087  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1088  goto error;
1089  }
1090 
1091  allocator = new decklink_allocator();
1092  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1093  allocator->Release();
1094  if (ret < 0) {
1095  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1096  goto error;
1097  }
1098 
1099  if (!cctx->format_code) {
1100  if (decklink_autodetect(cctx) < 0) {
1101  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1102  ret = AVERROR(EIO);
1103  goto error;
1104  }
1105  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1106  }
1107  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1108  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1109  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1110  ret = AVERROR(EIO);
1111  goto error;
1112  }
1113 
1114 #if !CONFIG_LIBZVBI
1115  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1116  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1117  ret = AVERROR(ENOSYS);
1118  goto error;
1119  }
1120 #endif
1121 
1122  /* Setup streams. */
1123  st = avformat_new_stream(avctx, NULL);
1124  if (!st) {
1125  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1126  ret = AVERROR(ENOMEM);
1127  goto error;
1128  }
1129  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1130  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1131  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1132  st->codecpar->channels = cctx->audio_channels;
1133  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1134  ctx->audio_st=st;
1135 
1136  st = avformat_new_stream(avctx, NULL);
1137  if (!st) {
1138  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1139  ret = AVERROR(ENOMEM);
1140  goto error;
1141  }
1142  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1143  st->codecpar->width = ctx->bmd_width;
1144  st->codecpar->height = ctx->bmd_height;
1145 
1146  st->time_base.den = ctx->bmd_tb_den;
1147  st->time_base.num = ctx->bmd_tb_num;
1148  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1149 
1150  switch((BMDPixelFormat)cctx->raw_format) {
1151  case bmdFormat8BitYUV:
1152  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1153  st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
1154  st->codecpar->format = AV_PIX_FMT_UYVY422;
1155  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1156  break;
1157  case bmdFormat10BitYUV:
1158  st->codecpar->codec_id = AV_CODEC_ID_V210;
1159  st->codecpar->codec_tag = MKTAG('V','2','1','0');
1160  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1161  st->codecpar->bits_per_coded_sample = 10;
1162  break;
1163  case bmdFormat8BitARGB:
1164  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1165  st->codecpar->format = AV_PIX_FMT_0RGB;
1166  st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
1167  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1168  break;
1169  case bmdFormat8BitBGRA:
1170  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1171  st->codecpar->format = AV_PIX_FMT_BGR0;
1172  st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
1173  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1174  break;
1175  case bmdFormat10BitRGB:
1176  st->codecpar->codec_id = AV_CODEC_ID_R210;
1177  st->codecpar->codec_tag = MKTAG('R','2','1','0');
1178  st->codecpar->format = AV_PIX_FMT_RGB48LE;
1179  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1180  st->codecpar->bits_per_coded_sample = 10;
1181  break;
1182  default:
1183  av_log(avctx, AV_LOG_ERROR, "Raw Format %.4s not supported\n", (char*) &cctx->raw_format);
1184  ret = AVERROR(EINVAL);
1185  goto error;
1186  }
1187 
1188  switch (ctx->bmd_field_dominance) {
1189  case bmdUpperFieldFirst:
1190  st->codecpar->field_order = AV_FIELD_TT;
1191  break;
1192  case bmdLowerFieldFirst:
1193  st->codecpar->field_order = AV_FIELD_BB;
1194  break;
1195  case bmdProgressiveFrame:
1196  case bmdProgressiveSegmentedFrame:
1197  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1198  break;
1199  }
1200 
1201  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1202 
1203  ctx->video_st=st;
1204 
1205  if (ctx->teletext_lines) {
1206  st = avformat_new_stream(avctx, NULL);
1207  if (!st) {
1208  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1209  ret = AVERROR(ENOMEM);
1210  goto error;
1211  }
1212  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1213  st->time_base.den = ctx->bmd_tb_den;
1214  st->time_base.num = ctx->bmd_tb_num;
1215  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1216  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1217  ctx->teletext_st = st;
1218  }
1219 
1220  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
1221  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
1222 
1223  if (result != S_OK) {
1224  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1225  ret = AVERROR(EIO);
1226  goto error;
1227  }
1228 
1229  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1230  (BMDPixelFormat) cctx->raw_format,
1231  bmdVideoInputFlagDefault);
1232 
1233  if (result != S_OK) {
1234  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1235  ret = AVERROR(EIO);
1236  goto error;
1237  }
1238 
1239  avpacket_queue_init (avctx, &ctx->queue);
1240 
1241  if (ctx->dli->StartStreams() != S_OK) {
1242  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1243  ret = AVERROR(EIO);
1244  goto error;
1245  }
1246 
1247  return 0;
1248 
1249 error:
1250  ff_decklink_cleanup(avctx);
1251  return ret;
1252 }
1253 
1255 {
1256  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1257  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1258 
1259  avpacket_queue_get(&ctx->queue, pkt, 1);
1260 
1261  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1262  int size;
1263  const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
1264  if (side_metadata) {
1265  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1266  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1267  }
1268  }
1269 
1270  return 0;
1271 }
1272 
1274 {
1275  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1276 }
1277 
1278 } /* extern "C" */
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
#define NULL
Definition: coverity.c:32
static int shift(int a, int b)
Definition: sonic.c:82
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
#define S_OK
Definition: windows2linux.h:40
#define pthread_mutex_lock(a)
Definition: ffprobe.c:61
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
const char * fmt
Definition: avisynth_c.h:861
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4943
const uint8_t ff_reverse[256]
Definition: reverse.c:23
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1429
int num
Numerator.
Definition: rational.h:59
int index
stream index in AVFormatContext
Definition: avformat.h:876
int size
Definition: avcodec.h:1534
BMDDisplayMode mode
Convenience header that includes libavutil&#39;s core.
#define tc
Definition: regdef.h:69
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
static AVPacket pkt
static void error(const char *err)
pthread_cond_t cond
#define src
Definition: vp8dsp.c:254
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
Format I/O context.
Definition: avformat.h:1352
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:524
pthread_mutex_t mutex
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt)
Return a value representing the fourCC code associated to the pixel format pix_fmt, or 0 if no associated fourCC code can be found.
Definition: raw.c:304
AVPacket pkt
Definition: avformat.h:2026
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1551
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4524
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
uint8_t * data
Definition: avcodec.h:1533
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:658
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
ptrdiff_t size
Definition: opengl_enc.c:100
#define E_NOINTERFACE
Definition: windows2linux.h:42
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1565
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Main libavdevice API header.
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const uint16_t mask[17]
Definition: lzw.c:38
AVPacketList * last_pkt
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
#define FALSE
Definition: windows2linux.h:37
char * url
input or output URL.
Definition: avformat.h:1448
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1516
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1539
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
AVPacketList * first_pkt
unsigned long long size
#define width
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:666
AVFormatContext * ctx
Definition: movenc.c:48
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
AVDictionary * metadata
Definition: avformat.h:939
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:65
static volatile int checksum
Definition: adler32.c:30
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define FF_ARRAY_ELEMS(a)
if(ret)
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:875
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
A list of zero terminated key/value strings.
Definition: avcodec.h:1359
void * LPVOID
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
Pack a dictionary for use in side_data.
Definition: avpacket.c:489
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:600
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:200
void * buf
Definition: avisynth_c.h:766
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:295
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
int64_t max_q_size
AVFormatContext * avctx
List of devices.
Definition: avdevice.h:460
#define E_OUTOFMEMORY
Definition: windows2linux.h:45
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
DWORD HRESULT
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define av_parity
Definition: intmath.h:158
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Main libavformat public API header.
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
struct AVPacketList * next
Definition: avformat.h:2027
common internal and external API header
uint32_t ULONG
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:808
#define av_free(p)
int len
void * priv_data
Format private data.
Definition: avformat.h:1380
int channels
Audio only.
Definition: avcodec.h:4142
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1532
and forward the result(frame or status change) to the corresponding input.If nothing is possible
#define av_freep(p)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1022
int stream_index
Definition: avcodec.h:1535
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:904
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
Definition: common.h:366
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:999
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1510
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1526
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101