FFmpeg
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 #include <vector>
26 using std::atomic;
27 
28 /* Include internal.h first to avoid conflict between winsock.h (used by
29  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
30 extern "C" {
31 #include "libavformat/internal.h"
32 }
33 
34 #include <DeckLinkAPI.h>
35 
36 extern "C" {
37 #include "config.h"
38 #include "libavformat/avformat.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/avutil.h"
41 #include "libavutil/common.h"
42 #include "libavutil/imgutils.h"
43 #include "libavutil/intreadwrite.h"
44 #include "libavutil/time.h"
45 #include "libavutil/mathematics.h"
46 #include "libavutil/reverse.h"
47 #include "avdevice.h"
48 #if CONFIG_LIBZVBI
49 #include <libzvbi.h>
50 #endif
51 }
52 
53 #include "decklink_common.h"
54 #include "decklink_dec.h"
55 
56 #define MAX_WIDTH_VANC 1920
57 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
58 
59 typedef struct VANCLineNumber {
60  BMDDisplayMode mode;
64  int vanc_end;
66 
67 /* These VANC line numbers need not be very accurate. In any case
68  * GetBufferForVerticalBlankingLine() will return an error when invalid
69  * ancillary line number was requested. We just need to make sure that the
70  * entire VANC region is covered, while making sure we don't decode VANC of
71  * another source during switching*/
73  /* SD Modes */
74 
75  {bmdModeNTSC, 11, 19, 274, 282},
76  {bmdModeNTSC2398, 11, 19, 274, 282},
77  {bmdModePAL, 7, 22, 320, 335},
78  {bmdModeNTSCp, 11, -1, -1, 39},
79  {bmdModePALp, 7, -1, -1, 45},
80 
81  /* HD 1080 Modes */
82 
83  {bmdModeHD1080p2398, 8, -1, -1, 42},
84  {bmdModeHD1080p24, 8, -1, -1, 42},
85  {bmdModeHD1080p25, 8, -1, -1, 42},
86  {bmdModeHD1080p2997, 8, -1, -1, 42},
87  {bmdModeHD1080p30, 8, -1, -1, 42},
88  {bmdModeHD1080i50, 8, 20, 570, 585},
89  {bmdModeHD1080i5994, 8, 20, 570, 585},
90  {bmdModeHD1080i6000, 8, 20, 570, 585},
91  {bmdModeHD1080p50, 8, -1, -1, 42},
92  {bmdModeHD1080p5994, 8, -1, -1, 42},
93  {bmdModeHD1080p6000, 8, -1, -1, 42},
94 
95  /* HD 720 Modes */
96 
97  {bmdModeHD720p50, 8, -1, -1, 26},
98  {bmdModeHD720p5994, 8, -1, -1, 26},
99  {bmdModeHD720p60, 8, -1, -1, 26},
100 
101  /* For all other modes, for which we don't support VANC */
102  {bmdModeUnknown, 0, -1, -1, -1}
103 };
104 
105 class decklink_allocator : public IDeckLinkMemoryAllocator
106 {
107 public:
108  decklink_allocator(): _refs(1) { }
109  virtual ~decklink_allocator() { }
110 
111  // IDeckLinkMemoryAllocator methods
112  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
113  {
114  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
115  if (!buf)
116  return E_OUTOFMEMORY;
117  *allocatedBuffer = buf;
118  return S_OK;
119  }
120  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
121  {
122  av_free(buffer);
123  return S_OK;
124  }
125  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
126  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
127 
128  // IUnknown methods
129  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
130  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
131  virtual ULONG STDMETHODCALLTYPE Release(void)
132  {
133  int ret = --_refs;
134  if (!ret)
135  delete this;
136  return ret;
137  }
138 
139 private:
140  std::atomic<int> _refs;
141 };
142 
143 extern "C" {
144 static void decklink_object_free(void *opaque, uint8_t *data)
145 {
146  IUnknown *obj = (class IUnknown *)opaque;
147  obj->Release();
148 }
149 }
150 
151 static int get_vanc_line_idx(BMDDisplayMode mode)
152 {
153  unsigned int i;
154  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
155  if (mode == vanc_line_numbers[i].mode)
156  return i;
157  }
158  /* Return the VANC idx for Unknown mode */
159  return i - 1;
160 }
161 
162 static inline void clear_parity_bits(uint16_t *buf, int len) {
163  int i;
164  for (i = 0; i < len; i++)
165  buf[i] &= 0xff;
166 }
167 
168 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
169  int i;
170  uint16_t vanc_sum = 0;
171  for (i = 3; i < len - 1; i++) {
172  uint16_t v = buf[i];
173  int np = v >> 8;
174  int p = av_parity(v & 0xff);
175  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
176  // Parity check failed
177  return -1;
178  }
179  vanc_sum += v;
180  }
181  vanc_sum &= 0x1ff;
182  vanc_sum |= ((~vanc_sum & 0x100) << 1);
183  if (checksum != vanc_sum) {
184  // Checksum verification failed
185  return -1;
186  }
187  return 0;
188 }
189 
190 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
191 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
192 {
193  int i;
194  for (i = 0; i < width / 3; i++) {
195  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
196  *dst++ = src[4] + ((src[5] & 3) << 8);
197  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
198  src += 8;
199  }
200 }
201 
202 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
203 {
204  int i;
205  for (i = 0; i < width * 2 / 3; i++) {
206  *dst++ = src[0] + ((src[1] & 3) << 8);
207  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
208  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
209  src += 4;
210  }
211 }
212 
214 {
215  uint8_t ret = (line < 313) << 5;
216  if (line >= 7 && line <= 22)
217  ret += line;
218  if (line >= 320 && line <= 335)
219  ret += (line - 313);
220  return ret;
221 }
222 
223 static void fill_data_unit_head(int line, uint8_t *tgt)
224 {
225  tgt[0] = 0x02; // data_unit_id
226  tgt[1] = 0x2c; // data_unit_length
227  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
228  tgt[3] = 0xe4; // framing code
229 }
230 
231 #if CONFIG_LIBZVBI
232 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
233 {
234  vbi_bit_slicer slicer;
235 
236  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
237 
238  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
239  return tgt;
240 
242 
243  return tgt + 46;
244 }
245 
246 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
247 {
248  uint8_t y[720];
249  uint8_t *py = y;
250  uint8_t *pend = y + 720;
251  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
252  * so we extract the 8 MSBs of the luma component, that is enough for
253  * teletext bit slicing. */
254  while (py < pend) {
255  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
256  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
257  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
258  src += 8;
259  }
260  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
261 }
262 #endif
263 
265 {
266  int i;
267 
268  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
269  return tgt;
270 
271  fill_data_unit_head(line, tgt);
272 
273  py += 3;
274  tgt += 4;
275 
276  for (i = 0; i < 42; i++)
277  *tgt++ = ff_reverse[py[i] & 255];
278 
279  return tgt;
280 }
281 
282 static int linemask_matches(int line, int64_t mask)
283 {
284  int shift = -1;
285  if (line >= 6 && line <= 22)
286  shift = line - 6;
287  if (line >= 318 && line <= 335)
288  shift = line - 318 + 17;
289  return shift >= 0 && ((1ULL << shift) & mask);
290 }
291 
292 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
293 {
294  if (py < pend - 9) {
295  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
296  uint16_t *descriptors = py + 4;
297  int i;
298  py += 9;
299  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
300  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
301  if (line && linemask_matches(line, wanted_lines))
302  tgt = teletext_data_unit_from_op47_vbi_packet(line, py, tgt);
303  }
304  }
305  }
306  return tgt;
307 }
308 
309 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
310 {
311  uint16_t did = py[0]; // data id
312  uint16_t sdid = py[1]; // secondary data id
313  uint16_t dc = py[2] & 255; // data count
314  py += 3;
315  pend = FFMIN(pend, py + dc);
316  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
317  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
318  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
319  py += 2; // priority, line/field
320  while (py < pend - 3) {
321  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
322  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
323  }
324  }
325  return tgt;
326 }
327 
328 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
329  unsigned &cc_count)
330 {
331  size_t i, len = (buf[5] & 0xff) + 6 + 1;
332  uint8_t cdp_sum, rate;
333  uint16_t hdr, ftr;
334  uint8_t *cc;
335  uint16_t *cdp = &buf[6]; // CDP follows
336  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
337  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
338  return NULL;
339  }
340 
341  len -= 7; // remove VANC header and checksum
342 
343  if (cdp[2] != len) {
344  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
345  return NULL;
346  }
347 
348  cdp_sum = 0;
349  for (i = 0; i < len - 1; i++)
350  cdp_sum += cdp[i];
351  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
352  if (cdp[len - 1] != cdp_sum) {
353  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
354  return NULL;
355  }
356 
357  rate = cdp[3];
358  if (!(rate & 0x0f)) {
359  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
360  return NULL;
361  }
362  rate >>= 4;
363  if (rate > 8) {
364  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
365  return NULL;
366  }
367 
368  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
369  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
370  return NULL;
371  }
372 
373  hdr = (cdp[5] << 8) | cdp[6];
374  if (cdp[7] != 0x72) /* ccdata_id */ {
375  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
376  return NULL;
377  }
378 
379  cc_count = cdp[8];
380  if (!(cc_count & 0xe0)) {
381  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
382  return NULL;
383  }
384 
385  cc_count &= 0x1f;
386  if ((len - 13) < cc_count * 3) {
387  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
388  return NULL;
389  }
390 
391  if (cdp[len - 4] != 0x74) /* footer id */ {
392  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
393  return NULL;
394  }
395 
396  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
397  if (ftr != hdr) {
398  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
399  return NULL;
400  }
401 
402  cc = (uint8_t *)av_malloc(cc_count * 3);
403  if (cc == NULL) {
404  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
405  return NULL;
406  }
407 
408  for (size_t i = 0; i < cc_count; i++) {
409  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
410  cc[3*i + 1] = cdp[9 + 3*i+1];
411  cc[3*i + 2] = cdp[9 + 3*i+2];
412  }
413 
414  cc_count *= 3;
415  return cc;
416 }
417 
418 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
419  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
420 {
421  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
422  uint16_t *max_buf = buf + width;
423 
424  while (buf < max_buf - 6) {
425  int len;
426  uint16_t did = buf[3] & 0xFF; // data id
427  uint16_t sdid = buf[4] & 0xFF; // secondary data id
428  /* Check for VANC header */
429  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
430  return tgt;
431  }
432 
433  len = (buf[5] & 0xff) + 6 + 1;
434  if (len > max_buf - buf) {
435  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
436  len, max_buf - buf);
437  return tgt;
438  }
439 
440  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
441  width == 1920 && tgt_size >= 1920) {
442  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
443  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
444  goto skip_packet;
445  }
446  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
447  } else if (did == 0x61 && sdid == 0x01) {
448  unsigned int data_len;
449  uint8_t *data;
450  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
451  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
452  goto skip_packet;
453  }
454  clear_parity_bits(buf, len);
455  data = vanc_to_cc(avctx, buf, width, data_len);
456  if (data) {
457  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
458  av_free(data);
459  }
460  } else {
461  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
462  did, sdid);
463  }
464 skip_packet:
465  buf += len;
466  }
467 
468  return tgt;
469 }
470 
472 {
473  struct decklink_cctx *ctx = (struct decklink_cctx *)avctx->priv_data;
474  memset(q, 0, sizeof(AVPacketQueue));
477  q->avctx = avctx;
478  q->max_q_size = ctx->queue_size;
479 }
480 
482 {
483  AVPacketList *pkt, *pkt1;
484 
486  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
487  pkt1 = pkt->next;
488  av_packet_unref(&pkt->pkt);
489  av_freep(&pkt);
490  }
491  q->last_pkt = NULL;
492  q->first_pkt = NULL;
493  q->nb_packets = 0;
494  q->size = 0;
496 }
497 
499 {
503 }
504 
505 static unsigned long long avpacket_queue_size(AVPacketQueue *q)
506 {
507  unsigned long long size;
509  size = q->size;
511  return size;
512 }
513 
515 {
516  AVPacketList *pkt1;
517 
518  // Drop Packet if queue size is > maximum queue size
519  if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
520  av_packet_unref(pkt);
521  av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
522  return -1;
523  }
524  /* ensure the packet is reference counted */
525  if (av_packet_make_refcounted(pkt) < 0) {
526  av_packet_unref(pkt);
527  return -1;
528  }
529 
530  pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
531  if (!pkt1) {
532  av_packet_unref(pkt);
533  return -1;
534  }
535  av_packet_move_ref(&pkt1->pkt, pkt);
536  pkt1->next = NULL;
537 
539 
540  if (!q->last_pkt) {
541  q->first_pkt = pkt1;
542  } else {
543  q->last_pkt->next = pkt1;
544  }
545 
546  q->last_pkt = pkt1;
547  q->nb_packets++;
548  q->size += pkt1->pkt.size + sizeof(*pkt1);
549 
551 
553  return 0;
554 }
555 
557 {
558  AVPacketList *pkt1;
559  int ret;
560 
562 
563  for (;; ) {
564  pkt1 = q->first_pkt;
565  if (pkt1) {
566  q->first_pkt = pkt1->next;
567  if (!q->first_pkt) {
568  q->last_pkt = NULL;
569  }
570  q->nb_packets--;
571  q->size -= pkt1->pkt.size + sizeof(*pkt1);
572  *pkt = pkt1->pkt;
573  av_free(pkt1);
574  ret = 1;
575  break;
576  } else if (!block) {
577  ret = 0;
578  break;
579  } else {
580  pthread_cond_wait(&q->cond, &q->mutex);
581  }
582  }
584  return ret;
585 }
586 
587 static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
588 {
589  const uint8_t KLV_DID = 0x44;
590  const uint8_t KLV_IN_VANC_SDID = 0x04;
591 
592  struct KLVPacket
593  {
594  uint16_t sequence_counter;
595  std::vector<uint8_t> data;
596  };
597 
598  size_t total_size = 0;
599  std::vector<std::vector<KLVPacket>> klv_packets(256);
600 
601  IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
602  if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
603  return;
604 
605  IDeckLinkAncillaryPacketIterator *it = nullptr;
606  if (packets->GetPacketIterator(&it) != S_OK) {
607  packets->Release();
608  return;
609  }
610 
611  IDeckLinkAncillaryPacket *packet = nullptr;
612  while (it->Next(&packet) == S_OK) {
613  uint8_t *data = nullptr;
614  uint32_t size = 0;
615 
616  if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
617  av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
618 
619  if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
620  // MID and PSC
621  if (size > 3) {
622  uint8_t mid = data[0];
623  uint16_t psc = data[1] << 8 | data[2];
624 
625  av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
626 
627  auto& list = klv_packets[mid];
628  uint16_t expected_psc = list.size() + 1;
629 
630  if (psc == expected_psc) {
631  uint32_t data_len = size - 3;
632  total_size += data_len;
633 
634  KLVPacket packet{ psc };
635  packet.data.resize(data_len);
636  memcpy(packet.data.data(), data + 3, data_len);
637 
638  list.push_back(std::move(packet));
639  } else {
640  av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
641 
642  if (!list.empty()) {
643  for (auto& klv : list)
644  total_size -= klv.data.size();
645 
646  list.clear();
647  }
648  }
649  }
650  }
651  }
652 
653  packet->Release();
654  }
655 
656  it->Release();
657  packets->Release();
658 
659  if (total_size > 0) {
660  std::vector<uint8_t> klv;
661  klv.reserve(total_size);
662 
663  for (size_t i = 0; i < klv_packets.size(); ++i) {
664  auto& list = klv_packets[i];
665 
666  if (list.empty())
667  continue;
668 
669  av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
670 
671  for (auto& packet : list)
672  klv.insert(klv.end(), packet.data.begin(), packet.data.end());
673  }
674 
675  AVPacket klv_packet;
676  av_init_packet(&klv_packet);
677  klv_packet.pts = pts;
678  klv_packet.dts = pts;
679  klv_packet.flags |= AV_PKT_FLAG_KEY;
680  klv_packet.stream_index = ctx->klv_st->index;
681  klv_packet.data = klv.data();
682  klv_packet.size = klv.size();
683 
684  if (avpacket_queue_put(&ctx->queue, &klv_packet) < 0) {
685  ++ctx->dropped;
686  }
687  }
688 }
689 
690 class decklink_input_callback : public IDeckLinkInputCallback
691 {
692 public:
695 
696  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
697  virtual ULONG STDMETHODCALLTYPE AddRef(void);
698  virtual ULONG STDMETHODCALLTYPE Release(void);
699  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
700  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
701 
702 private:
703  std::atomic<int> _refs;
706  int no_video;
709 };
710 
712 {
713  avctx = _avctx;
714  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
715  ctx = (struct decklink_ctx *)cctx->ctx;
716  no_video = 0;
718 }
719 
721 {
722 }
723 
725 {
726  return ++_refs;
727 }
728 
730 {
731  int ret = --_refs;
732  if (!ret)
733  delete this;
734  return ret;
735 }
736 
737 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
738  IDeckLinkAudioInputPacket *audioFrame,
739  int64_t wallclock,
740  int64_t abs_wallclock,
741  DecklinkPtsSource pts_src,
742  AVRational time_base, int64_t *initial_pts,
743  int copyts)
744 {
745  int64_t pts = AV_NOPTS_VALUE;
746  BMDTimeValue bmd_pts;
747  BMDTimeValue bmd_duration;
748  HRESULT res = E_INVALIDARG;
749  switch (pts_src) {
750  case PTS_SRC_AUDIO:
751  if (audioFrame)
752  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
753  break;
754  case PTS_SRC_VIDEO:
755  if (videoFrame)
756  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
757  break;
758  case PTS_SRC_REFERENCE:
759  if (videoFrame)
760  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
761  break;
762  case PTS_SRC_WALLCLOCK:
763  /* fall through */
765  {
766  /* MSVC does not support compound literals like AV_TIME_BASE_Q
767  * in C++ code (compiler error C4576) */
768  AVRational timebase;
769  timebase.num = 1;
770  timebase.den = AV_TIME_BASE;
771  if (pts_src == PTS_SRC_WALLCLOCK)
772  pts = av_rescale_q(wallclock, timebase, time_base);
773  else
774  pts = av_rescale_q(abs_wallclock, timebase, time_base);
775  break;
776  }
777  }
778  if (res == S_OK)
779  pts = bmd_pts / time_base.num;
780 
781  if (!copyts) {
782  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
783  *initial_pts = pts;
784  if (*initial_pts != AV_NOPTS_VALUE)
785  pts -= *initial_pts;
786  }
787 
788  return pts;
789 }
790 
792  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
793 {
794  void *frameBytes;
795  void *audioFrameBytes;
796  BMDTimeValue frameTime;
797  BMDTimeValue frameDuration;
798  int64_t wallclock = 0, abs_wallclock = 0;
799  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
800 
801  if (ctx->autodetect) {
802  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
803  ctx->bmd_mode == bmdModeUnknown)
804  {
806  }
807  return S_OK;
808  }
809 
810  // Drop the frames till system's timestamp aligns with the configured value.
811  if (0 == ctx->frameCount && cctx->timestamp_align) {
812  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
813  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
814  if (av_cmp_q(remainder, frame_duration) > 0) {
815  ++ctx->dropped;
816  return S_OK;
817  }
818  }
819 
820  ctx->frameCount++;
822  wallclock = av_gettime_relative();
824  abs_wallclock = av_gettime();
825 
826  // Handle Video Frame
827  if (videoFrame) {
828  AVPacket pkt;
829  av_init_packet(&pkt);
830  if (ctx->frameCount % 25 == 0) {
831  unsigned long long qsize = avpacket_queue_size(&ctx->queue);
833  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
834  ctx->frameCount,
835  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
836  (double)qsize / 1024 / 1024);
837  }
838 
839  videoFrame->GetBytes(&frameBytes);
840  videoFrame->GetStreamTime(&frameTime, &frameDuration,
842 
843  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
844  if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
845  unsigned bars[8] = {
846  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
847  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
848  int width = videoFrame->GetWidth();
849  int height = videoFrame->GetHeight();
850  unsigned *p = (unsigned *)frameBytes;
851 
852  for (int y = 0; y < height; y++) {
853  for (int x = 0; x < width; x += 2)
854  *p++ = bars[(x * 8) / width];
855  }
856  }
857 
858  if (!no_video) {
859  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
860  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
861  }
862  no_video = 1;
863  } else {
864  if (no_video) {
865  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
866  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
867  }
868  no_video = 0;
869 
870  // Handle Timecode (if requested)
871  if (ctx->tc_format) {
872  IDeckLinkTimecode *timecode;
873  if (videoFrame->GetTimecode(ctx->tc_format, &timecode) == S_OK) {
874  const char *tc = NULL;
875  DECKLINK_STR decklink_tc;
876  if (timecode->GetString(&decklink_tc) == S_OK) {
877  tc = DECKLINK_STRDUP(decklink_tc);
878  DECKLINK_FREE(decklink_tc);
879  }
880  timecode->Release();
881  if (tc) {
882  AVDictionary* metadata_dict = NULL;
883  int metadata_len;
884  uint8_t* packed_metadata;
885  if (av_dict_set(&metadata_dict, "timecode", tc, AV_DICT_DONT_STRDUP_VAL) >= 0) {
886  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
887  av_dict_free(&metadata_dict);
888  if (packed_metadata) {
889  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
890  av_freep(&packed_metadata);
891  else if (!ctx->tc_seen)
893  }
894  }
895  }
896  } else {
897  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
898  }
899  }
900  }
901 
902  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
903 
904  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
905  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
906  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
907  return S_OK;
908  }
909 
910  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
911  pkt.dts = pkt.pts;
912 
913  pkt.duration = frameDuration;
914  //To be made sure it still applies
915  pkt.flags |= AV_PKT_FLAG_KEY;
916  pkt.stream_index = ctx->video_st->index;
917  pkt.data = (uint8_t *)frameBytes;
918  pkt.size = videoFrame->GetRowBytes() *
919  videoFrame->GetHeight();
920  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
921 
922  if (!no_video) {
923  IDeckLinkVideoFrameAncillary *vanc;
924  AVPacket txt_pkt;
925  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
926  uint8_t *txt_buf = txt_buf0;
927 
928  if (ctx->enable_klv) {
929  handle_klv(avctx, ctx, videoFrame, pkt.pts);
930  }
931 
932  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
933  int i;
934  int64_t line_mask = 1;
935  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
936  txt_buf[0] = 0x10; // data_identifier - EBU_data
937  txt_buf++;
938 #if CONFIG_LIBZVBI
939  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
940  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
941  av_assert0(videoFrame->GetWidth() == 720);
942  for (i = 6; i < 336; i++, line_mask <<= 1) {
943  uint8_t *buf;
944  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
945  if (vanc_format == bmdFormat8BitYUV)
946  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
947  else
948  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
949  }
950  if (i == 22)
951  i = 317;
952  }
953  }
954 #endif
955  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
956  int idx = get_vanc_line_idx(ctx->bmd_mode);
957  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
958  uint8_t *buf;
959  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
960  uint16_t vanc[MAX_WIDTH_VANC];
961  size_t vanc_size = videoFrame->GetWidth();
962  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
963  vanc_size = vanc_size * 2;
964  unpack_v210(vanc, buf, videoFrame->GetWidth());
965  } else {
966  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
967  }
968  txt_buf = get_metadata(avctx, vanc, vanc_size,
969  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
970  }
971  if (i == vanc_line_numbers[idx].field0_vanc_end)
972  i = vanc_line_numbers[idx].field1_vanc_start - 1;
973  }
974  }
975  vanc->Release();
976  if (txt_buf - txt_buf0 > 1) {
977  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
978  while (stuffing_units--) {
979  memset(txt_buf, 0xff, 46);
980  txt_buf[1] = 0x2c; // data_unit_length
981  txt_buf += 46;
982  }
983  av_init_packet(&txt_pkt);
984  txt_pkt.pts = pkt.pts;
985  txt_pkt.dts = pkt.dts;
986  txt_pkt.stream_index = ctx->teletext_st->index;
987  txt_pkt.data = txt_buf0;
988  txt_pkt.size = txt_buf - txt_buf0;
989  if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
990  ++ctx->dropped;
991  }
992  }
993  }
994  }
995 
996  pkt.buf = av_buffer_create(pkt.data, pkt.size, decklink_object_free, videoFrame, 0);
997  if (pkt.buf)
998  videoFrame->AddRef();
999 
1000  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1001  ++ctx->dropped;
1002  }
1003  }
1004 
1005  // Handle Audio Frame
1006  if (audioFrame) {
1007  AVPacket pkt;
1008  BMDTimeValue audio_pts;
1009  av_init_packet(&pkt);
1010 
1011  //hack among hacks
1012  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
1013  audioFrame->GetBytes(&audioFrameBytes);
1014  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
1015  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
1016  pkt.dts = pkt.pts;
1017 
1018  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
1019  pkt.flags |= AV_PKT_FLAG_KEY;
1020  pkt.stream_index = ctx->audio_st->index;
1021  pkt.data = (uint8_t *)audioFrameBytes;
1022 
1023  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1024  ++ctx->dropped;
1025  }
1026  }
1027 
1028  return S_OK;
1029 }
1030 
1032  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
1033  BMDDetectedVideoInputFormatFlags)
1034 {
1035  ctx->bmd_mode = mode->GetDisplayMode();
1036  return S_OK;
1037 }
1038 
1039 static int decklink_autodetect(struct decklink_cctx *cctx) {
1040  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1041  DECKLINK_BOOL autodetect_supported = false;
1042  int i;
1043 
1044  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1045  return -1;
1046  if (autodetect_supported == false)
1047  return -1;
1048 
1049  ctx->autodetect = 1;
1050  ctx->bmd_mode = bmdModeUnknown;
1051  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
1052  bmdFormat8BitYUV,
1053  bmdVideoInputEnableFormatDetection) != S_OK) {
1054  return -1;
1055  }
1056 
1057  if (ctx->dli->StartStreams() != S_OK) {
1058  return -1;
1059  }
1060 
1061  // 3 second timeout
1062  for (i = 0; i < 30; i++) {
1063  av_usleep(100000);
1064  /* Sometimes VideoInputFrameArrived is called without the
1065  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
1066  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
1067  if (ctx->bmd_mode != bmdModeUnknown &&
1069  break;
1070  }
1071 
1072  ctx->dli->PauseStreams();
1073  ctx->dli->FlushStreams();
1074  ctx->autodetect = 0;
1075  if (ctx->bmd_mode != bmdModeUnknown) {
1076  cctx->format_code = (char *)av_mallocz(5);
1077  if (!cctx->format_code)
1078  return -1;
1079  AV_WB32(cctx->format_code, ctx->bmd_mode);
1080  return 0;
1081  } else {
1082  return -1;
1083  }
1084 
1085 }
1086 
1087 extern "C" {
1088 
1090 {
1091  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1092  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1093 
1094  if (ctx->dli) {
1095  ctx->dli->StopStreams();
1096  ctx->dli->DisableVideoInput();
1097  ctx->dli->DisableAudioInput();
1098  }
1099 
1100  ff_decklink_cleanup(avctx);
1102 
1103  av_freep(&cctx->ctx);
1104 
1105  return 0;
1106 }
1107 
1109 {
1110  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1111  struct decklink_ctx *ctx;
1112  class decklink_allocator *allocator;
1114  AVStream *st;
1115  HRESULT result;
1116  int ret;
1117 
1118  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1119  if (!ctx)
1120  return AVERROR(ENOMEM);
1121  ctx->list_devices = cctx->list_devices;
1122  ctx->list_formats = cctx->list_formats;
1123  ctx->enable_klv = cctx->enable_klv;
1125  ctx->preroll = cctx->preroll;
1126  ctx->duplex_mode = cctx->duplex_mode;
1127  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1129  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1131  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1135  ctx->draw_bars = cctx->draw_bars;
1136  ctx->audio_depth = cctx->audio_depth;
1137  cctx->ctx = ctx;
1138 
1139  /* Check audio channel option for valid values: 2, 8 or 16 */
1140  switch (cctx->audio_channels) {
1141  case 2:
1142  case 8:
1143  case 16:
1144  break;
1145  default:
1146  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1147  return AVERROR(EINVAL);
1148  }
1149 
1150  /* Check audio bit depth option for valid values: 16 or 32 */
1151  switch (cctx->audio_depth) {
1152  case 16:
1153  case 32:
1154  break;
1155  default:
1156  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1157  return AVERROR(EINVAL);
1158  }
1159 
1160  /* List available devices. */
1161  if (ctx->list_devices) {
1162  av_log(avctx, AV_LOG_WARNING, "The -list_devices option is deprecated and will be removed. Please use ffmpeg -sources decklink instead.\n");
1163  ff_decklink_list_devices_legacy(avctx, 1, 0);
1164  return AVERROR_EXIT;
1165  }
1166 
1167  ret = ff_decklink_init_device(avctx, avctx->url);
1168  if (ret < 0)
1169  return ret;
1170 
1171  /* Get input device. */
1172  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1173  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1174  avctx->url);
1175  ret = AVERROR(EIO);
1176  goto error;
1177  }
1178 
1179  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1180  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1181  ret = AVERROR(EIO);
1182  goto error;
1183  }
1184 
1185  /* List supported formats. */
1186  if (ctx->list_formats) {
1188  ret = AVERROR_EXIT;
1189  goto error;
1190  }
1191 
1193  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1194  input_callback->Release();
1195  if (ret < 0) {
1196  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1197  goto error;
1198  }
1199 
1200  allocator = new decklink_allocator();
1201  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1202  allocator->Release();
1203  if (ret < 0) {
1204  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1205  goto error;
1206  }
1207 
1208  if (!cctx->format_code) {
1209  if (decklink_autodetect(cctx) < 0) {
1210  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1211  ret = AVERROR(EIO);
1212  goto error;
1213  }
1214  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1215  }
1216  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1217  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1218  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1219  ret = AVERROR(EIO);
1220  goto error;
1221  }
1222 
1223 #if !CONFIG_LIBZVBI
1224  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1225  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1226  ret = AVERROR(ENOSYS);
1227  goto error;
1228  }
1229 #endif
1230 
1231  /* Setup streams. */
1232  st = avformat_new_stream(avctx, NULL);
1233  if (!st) {
1234  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1235  ret = AVERROR(ENOMEM);
1236  goto error;
1237  }
1238  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1239  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1240  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1241  st->codecpar->channels = cctx->audio_channels;
1242  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1243  ctx->audio_st=st;
1244 
1245  st = avformat_new_stream(avctx, NULL);
1246  if (!st) {
1247  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1248  ret = AVERROR(ENOMEM);
1249  goto error;
1250  }
1251  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1252  st->codecpar->width = ctx->bmd_width;
1253  st->codecpar->height = ctx->bmd_height;
1254 
1255  st->time_base.den = ctx->bmd_tb_den;
1256  st->time_base.num = ctx->bmd_tb_num;
1257  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1258 
1259  switch((BMDPixelFormat)cctx->raw_format) {
1260  case bmdFormat8BitYUV:
1261  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1262  st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
1263  st->codecpar->format = AV_PIX_FMT_UYVY422;
1264  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1265  break;
1266  case bmdFormat10BitYUV:
1267  st->codecpar->codec_id = AV_CODEC_ID_V210;
1268  st->codecpar->codec_tag = MKTAG('V','2','1','0');
1269  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1270  st->codecpar->bits_per_coded_sample = 10;
1271  break;
1272  case bmdFormat8BitARGB:
1273  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1274  st->codecpar->format = AV_PIX_FMT_0RGB;
1275  st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
1276  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1277  break;
1278  case bmdFormat8BitBGRA:
1279  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1280  st->codecpar->format = AV_PIX_FMT_BGR0;
1281  st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
1282  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1283  break;
1284  case bmdFormat10BitRGB:
1285  st->codecpar->codec_id = AV_CODEC_ID_R210;
1286  st->codecpar->codec_tag = MKTAG('R','2','1','0');
1287  st->codecpar->format = AV_PIX_FMT_RGB48LE;
1288  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1289  st->codecpar->bits_per_coded_sample = 10;
1290  break;
1291  default:
1292  av_log(avctx, AV_LOG_ERROR, "Raw Format %.4s not supported\n", (char*) &cctx->raw_format);
1293  ret = AVERROR(EINVAL);
1294  goto error;
1295  }
1296 
1297  switch (ctx->bmd_field_dominance) {
1298  case bmdUpperFieldFirst:
1299  st->codecpar->field_order = AV_FIELD_TT;
1300  break;
1301  case bmdLowerFieldFirst:
1302  st->codecpar->field_order = AV_FIELD_BB;
1303  break;
1304  case bmdProgressiveFrame:
1305  case bmdProgressiveSegmentedFrame:
1306  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1307  break;
1308  }
1309 
1310  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1311 
1312  ctx->video_st=st;
1313 
1314  if (ctx->enable_klv) {
1315  st = avformat_new_stream(avctx, NULL);
1316  if (!st) {
1317  ret = AVERROR(ENOMEM);
1318  goto error;
1319  }
1320  st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
1321  st->time_base.den = ctx->bmd_tb_den;
1322  st->time_base.num = ctx->bmd_tb_num;
1323  st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
1324  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1325  ctx->klv_st = st;
1326  }
1327 
1328  if (ctx->teletext_lines) {
1329  st = avformat_new_stream(avctx, NULL);
1330  if (!st) {
1331  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1332  ret = AVERROR(ENOMEM);
1333  goto error;
1334  }
1335  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1336  st->time_base.den = ctx->bmd_tb_den;
1337  st->time_base.num = ctx->bmd_tb_num;
1338  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1339  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1340  ctx->teletext_st = st;
1341  }
1342 
1343  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
1344  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
1345 
1346  if (result != S_OK) {
1347  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1348  ret = AVERROR(EIO);
1349  goto error;
1350  }
1351 
1352  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1353  (BMDPixelFormat) cctx->raw_format,
1354  bmdVideoInputFlagDefault);
1355 
1356  if (result != S_OK) {
1357  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1358  ret = AVERROR(EIO);
1359  goto error;
1360  }
1361 
1362  avpacket_queue_init (avctx, &ctx->queue);
1363 
1364  if (ctx->dli->StartStreams() != S_OK) {
1365  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1366  ret = AVERROR(EIO);
1367  goto error;
1368  }
1369 
1370  return 0;
1371 
1372 error:
1373  ff_decklink_cleanup(avctx);
1374  return ret;
1375 }
1376 
1378 {
1379  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1380  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1381 
1382  avpacket_queue_get(&ctx->queue, pkt, 1);
1383 
1384  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1385  int size;
1386  const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
1387  if (side_metadata) {
1388  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1389  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1390  }
1391  }
1392 
1393  return 0;
1394 }
1395 
1397 {
1398  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1399 }
1400 
1401 } /* extern "C" */
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
#define NULL
Definition: coverity.c:32
static int shift(int a, int b)
Definition: sonic.c:82
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
#define pthread_mutex_lock(a)
Definition: ffprobe.c:62
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4938
const uint8_t ff_reverse[256]
Definition: reverse.c:23
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
int num
Numerator.
Definition: rational.h:59
int index
stream index in AVFormatContext
Definition: avformat.h:877
int size
Definition: packet.h:356
BMDDisplayMode mode
Convenience header that includes libavutil&#39;s core.
#define tc
Definition: regdef.h:69
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
static AVPacket pkt
static void error(const char *err)
pthread_cond_t cond
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
Format I/O context.
Definition: avformat.h:1351
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
Opaque data information usually continuous.
Definition: avutil.h:203
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:529
pthread_mutex_t mutex
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt)
Return a value representing the fourCC code associated to the pixel format pix_fmt, or 0 if no associated fourCC code can be found.
Definition: raw.c:304
AVPacket pkt
Definition: avformat.h:2025
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
Definition: mxf.h:67
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4519
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
uint8_t * data
Definition: packet.h:355
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s it
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:663
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
ptrdiff_t size
Definition: opengl_enc.c:100
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Main libavdevice API header.
#define src
Definition: vp8dsp.c:254
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static const uint16_t mask[17]
Definition: lzw.c:38
AVPacketList * last_pkt
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
char * url
input or output URL.
Definition: avformat.h:1447
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:338
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
AVPacketList * first_pkt
unsigned long long size
#define width
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:671
AVFormatContext * ctx
Definition: movenc.c:48
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
AVDictionary * metadata
Definition: avformat.h:940
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:66
static volatile int checksum
Definition: adler32.c:30
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define FF_ARRAY_ELEMS(a)
if(ret)
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:876
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
A list of zero terminated key/value strings.
Definition: packet.h:172
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
Pack a dictionary for use in side_data.
Definition: avpacket.c:494
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:200
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
Rational number (pair of numerator and denominator).
Definition: rational.h:58
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
int64_t max_q_size
AVFormatContext * avctx
List of devices.
Definition: avdevice.h:460
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define av_parity
Definition: intmath.h:158
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Main libavformat public API header.
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
struct AVPacketList * next
Definition: avformat.h:2026
common internal and external API header
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define av_free(p)
int len
void * priv_data
Format private data.
Definition: avformat.h:1379
int channels
Audio only.
Definition: codec_par.h:166
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:354
and forward the result(frame or status change) to the corresponding input.If nothing is possible
#define av_freep(p)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1023
int stream_index
Definition: packet.h:357
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:905
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
Definition: common.h:406
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1000
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:332
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
int i
Definition: input.c:406
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101