FFmpeg
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 #include <vector>
26 using std::atomic;
27 
28 /* Include internal.h first to avoid conflict between winsock.h (used by
29  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
30 extern "C" {
31 #include "libavformat/internal.h"
32 }
33 
34 #include <DeckLinkAPI.h>
35 
36 extern "C" {
37 #include "config.h"
39 #include "libavformat/avformat.h"
40 #include "libavutil/avassert.h"
41 #include "libavutil/avutil.h"
42 #include "libavutil/common.h"
43 #include "libavutil/internal.h"
44 #include "libavutil/imgutils.h"
45 #include "libavutil/intreadwrite.h"
46 #include "libavutil/time.h"
47 #include "libavutil/timecode.h"
48 #include "libavutil/mathematics.h"
49 #include "libavutil/reverse.h"
50 #include "avdevice.h"
51 #if CONFIG_LIBZVBI
52 #include <libzvbi.h>
53 #endif
54 }
55 
56 #include "decklink_common.h"
57 #include "decklink_dec.h"
58 
59 #define MAX_WIDTH_VANC 1920
60 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
61 
62 typedef struct VANCLineNumber {
63  BMDDisplayMode mode;
67  int vanc_end;
69 
70 /* These VANC line numbers need not be very accurate. In any case
71  * GetBufferForVerticalBlankingLine() will return an error when invalid
72  * ancillary line number was requested. We just need to make sure that the
73  * entire VANC region is covered, while making sure we don't decode VANC of
74  * another source during switching*/
76  /* SD Modes */
77 
78  {bmdModeNTSC, 11, 19, 274, 282},
79  {bmdModeNTSC2398, 11, 19, 274, 282},
80  {bmdModePAL, 7, 22, 320, 335},
81  {bmdModeNTSCp, 11, -1, -1, 39},
82  {bmdModePALp, 7, -1, -1, 45},
83 
84  /* HD 1080 Modes */
85 
86  {bmdModeHD1080p2398, 8, -1, -1, 42},
87  {bmdModeHD1080p24, 8, -1, -1, 42},
88  {bmdModeHD1080p25, 8, -1, -1, 42},
89  {bmdModeHD1080p2997, 8, -1, -1, 42},
90  {bmdModeHD1080p30, 8, -1, -1, 42},
91  {bmdModeHD1080i50, 8, 20, 570, 585},
92  {bmdModeHD1080i5994, 8, 20, 570, 585},
93  {bmdModeHD1080i6000, 8, 20, 570, 585},
94  {bmdModeHD1080p50, 8, -1, -1, 42},
95  {bmdModeHD1080p5994, 8, -1, -1, 42},
96  {bmdModeHD1080p6000, 8, -1, -1, 42},
97 
98  /* HD 720 Modes */
99 
100  {bmdModeHD720p50, 8, -1, -1, 26},
101  {bmdModeHD720p5994, 8, -1, -1, 26},
102  {bmdModeHD720p60, 8, -1, -1, 26},
103 
104  /* For all other modes, for which we don't support VANC */
105  {bmdModeUnknown, 0, -1, -1, -1}
106 };
107 
108 class decklink_allocator : public IDeckLinkMemoryAllocator
109 {
110 public:
112  virtual ~decklink_allocator() { }
113 
114  // IDeckLinkMemoryAllocator methods
115  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
116  {
117  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
118  if (!buf)
119  return E_OUTOFMEMORY;
120  *allocatedBuffer = buf;
121  return S_OK;
122  }
123  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
124  {
125  av_free(buffer);
126  return S_OK;
127  }
128  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
129  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
130 
131  // IUnknown methods
132  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
133  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
134  virtual ULONG STDMETHODCALLTYPE Release(void)
135  {
136  int ret = --_refs;
137  if (!ret)
138  delete this;
139  return ret;
140  }
141 
142 private:
143  std::atomic<int> _refs;
144 };
145 
146 extern "C" {
147 static void decklink_object_free(void *opaque, uint8_t *data)
148 {
149  IUnknown *obj = (class IUnknown *)opaque;
150  obj->Release();
151 }
152 }
153 
154 static int get_vanc_line_idx(BMDDisplayMode mode)
155 {
156  unsigned int i;
157  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
158  if (mode == vanc_line_numbers[i].mode)
159  return i;
160  }
161  /* Return the VANC idx for Unknown mode */
162  return i - 1;
163 }
164 
165 static inline void clear_parity_bits(uint16_t *buf, int len) {
166  int i;
167  for (i = 0; i < len; i++)
168  buf[i] &= 0xff;
169 }
170 
171 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
172  int i;
173  uint16_t vanc_sum = 0;
174  for (i = 3; i < len - 1; i++) {
175  uint16_t v = buf[i];
176  int np = v >> 8;
177  int p = av_parity(v & 0xff);
178  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
179  // Parity check failed
180  return -1;
181  }
182  vanc_sum += v;
183  }
184  vanc_sum &= 0x1ff;
185  vanc_sum |= ((~vanc_sum & 0x100) << 1);
186  if (checksum != vanc_sum) {
187  // Checksum verification failed
188  return -1;
189  }
190  return 0;
191 }
192 
193 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
194 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
195 {
196  int i;
197  for (i = 0; i < width / 3; i++) {
198  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
199  *dst++ = src[4] + ((src[5] & 3) << 8);
200  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
201  src += 8;
202  }
203 }
204 
205 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
206 {
207  int i;
208  for (i = 0; i < width * 2 / 3; i++) {
209  *dst++ = src[0] + ((src[1] & 3) << 8);
210  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
211  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
212  src += 4;
213  }
214 }
215 
217 {
218  uint8_t ret = (line < 313) << 5;
219  if (line >= 7 && line <= 22)
220  ret += line;
221  if (line >= 320 && line <= 335)
222  ret += (line - 313);
223  return ret;
224 }
225 
226 static void fill_data_unit_head(int line, uint8_t *tgt)
227 {
228  tgt[0] = 0x02; // data_unit_id
229  tgt[1] = 0x2c; // data_unit_length
230  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
231  tgt[3] = 0xe4; // framing code
232 }
233 
234 #if CONFIG_LIBZVBI
235 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
236 {
237  vbi_bit_slicer slicer;
238 
239  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
240 
241  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
242  return tgt;
243 
245 
246  return tgt + 46;
247 }
248 
249 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
250 {
251  uint8_t y[720];
252  uint8_t *py = y;
253  uint8_t *pend = y + 720;
254  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
255  * so we extract the 8 MSBs of the luma component, that is enough for
256  * teletext bit slicing. */
257  while (py < pend) {
258  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
259  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
260  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
261  src += 8;
262  }
263  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
264 }
265 #endif
266 
267 static uint8_t* teletext_data_unit_from_op47_vbi_packet(int line, uint16_t *py, uint8_t *tgt)
268 {
269  int i;
270 
271  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
272  return tgt;
273 
275 
276  py += 3;
277  tgt += 4;
278 
279  for (i = 0; i < 42; i++)
280  *tgt++ = ff_reverse[py[i] & 255];
281 
282  return tgt;
283 }
284 
285 static int linemask_matches(int line, int64_t mask)
286 {
287  int shift = -1;
288  if (line >= 6 && line <= 22)
289  shift = line - 6;
290  if (line >= 318 && line <= 335)
291  shift = line - 318 + 17;
292  return shift >= 0 && ((1ULL << shift) & mask);
293 }
294 
295 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
296 {
297  if (py < pend - 9) {
298  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
299  uint16_t *descriptors = py + 4;
300  int i;
301  py += 9;
302  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
303  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
304  if (line && linemask_matches(line, wanted_lines))
306  }
307  }
308  }
309  return tgt;
310 }
311 
312 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
313 {
314  uint16_t did = py[0]; // data id
315  uint16_t sdid = py[1]; // secondary data id
316  uint16_t dc = py[2] & 255; // data count
317  py += 3;
318  pend = FFMIN(pend, py + dc);
319  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
320  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
321  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
322  py += 2; // priority, line/field
323  while (py < pend - 3) {
324  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
325  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
326  }
327  }
328  return tgt;
329 }
330 
331 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
332  unsigned &cc_count)
333 {
334  size_t i, len = (buf[5] & 0xff) + 6 + 1;
335  uint8_t cdp_sum, rate;
336  uint16_t hdr, ftr;
337  uint8_t *cc;
338  uint16_t *cdp = &buf[6]; // CDP follows
339  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
340  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
341  return NULL;
342  }
343 
344  len -= 7; // remove VANC header and checksum
345 
346  if (cdp[2] != len) {
347  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
348  return NULL;
349  }
350 
351  cdp_sum = 0;
352  for (i = 0; i < len - 1; i++)
353  cdp_sum += cdp[i];
354  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
355  if (cdp[len - 1] != cdp_sum) {
356  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
357  return NULL;
358  }
359 
360  rate = cdp[3];
361  if (!(rate & 0x0f)) {
362  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
363  return NULL;
364  }
365  rate >>= 4;
366  if (rate > 8) {
367  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
368  return NULL;
369  }
370 
371  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
372  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
373  return NULL;
374  }
375 
376  hdr = (cdp[5] << 8) | cdp[6];
377  if (cdp[7] != 0x72) /* ccdata_id */ {
378  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
379  return NULL;
380  }
381 
382  cc_count = cdp[8];
383  if (!(cc_count & 0xe0)) {
384  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
385  return NULL;
386  }
387 
388  cc_count &= 0x1f;
389  if ((len - 13) < cc_count * 3) {
390  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
391  return NULL;
392  }
393 
394  if (cdp[len - 4] != 0x74) /* footer id */ {
395  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
396  return NULL;
397  }
398 
399  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
400  if (ftr != hdr) {
401  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
402  return NULL;
403  }
404 
405  cc = (uint8_t *)av_malloc(cc_count * 3);
406  if (cc == NULL) {
407  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
408  return NULL;
409  }
410 
411  for (size_t i = 0; i < cc_count; i++) {
412  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
413  cc[3*i + 1] = cdp[9 + 3*i+1];
414  cc[3*i + 2] = cdp[9 + 3*i+2];
415  }
416 
417  cc_count *= 3;
418  return cc;
419 }
420 
421 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
422  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
423 {
424  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
425  uint16_t *max_buf = buf + width;
426 
427  while (buf < max_buf - 6) {
428  int len;
429  uint16_t did = buf[3] & 0xFF; // data id
430  uint16_t sdid = buf[4] & 0xFF; // secondary data id
431  /* Check for VANC header */
432  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
433  return tgt;
434  }
435 
436  len = (buf[5] & 0xff) + 6 + 1;
437  if (len > max_buf - buf) {
438  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
439  len, max_buf - buf);
440  return tgt;
441  }
442 
443  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
444  width == 1920 && tgt_size >= 1920) {
445  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
446  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
447  goto skip_packet;
448  }
449  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
450  } else if (did == 0x61 && sdid == 0x01) {
451  unsigned int data_len;
452  uint8_t *data;
453  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
454  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
455  goto skip_packet;
456  }
457  clear_parity_bits(buf, len);
458  data = vanc_to_cc(avctx, buf, width, data_len);
459  if (data) {
460  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
461  av_free(data);
462  }
463  } else {
464  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
465  did, sdid);
466  }
467 skip_packet:
468  buf += len;
469  }
470 
471  return tgt;
472 }
473 
475 {
476  struct decklink_cctx *ctx = (struct decklink_cctx *)avctx->priv_data;
477  memset(q, 0, sizeof(AVPacketQueue));
480  q->avctx = avctx;
481  q->max_q_size = ctx->queue_size;
482 }
483 
485 {
486  PacketListEntry *pkt, *pkt1;
487 
489  for (pkt = q->pkt_list.head; pkt != NULL; pkt = pkt1) {
490  pkt1 = pkt->next;
491  av_packet_unref(&pkt->pkt);
492  av_freep(&pkt);
493  }
494  q->pkt_list.head = NULL;
495  q->pkt_list.tail = NULL;
496  q->nb_packets = 0;
497  q->size = 0;
499 }
500 
502 {
506 }
507 
508 static unsigned long long avpacket_queue_size(AVPacketQueue *q)
509 {
510  unsigned long long size;
512  size = q->size;
514  return size;
515 }
516 
518 {
519  PacketListEntry *pkt1;
520 
521  // Drop Packet if queue size is > maximum queue size
522  if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
524  av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
525  return -1;
526  }
527  /* ensure the packet is reference counted */
528  if (av_packet_make_refcounted(pkt) < 0) {
530  return -1;
531  }
532 
533  pkt1 = (PacketListEntry *)av_malloc(sizeof(*pkt1));
534  if (!pkt1) {
536  return -1;
537  }
538  av_packet_move_ref(&pkt1->pkt, pkt);
539  pkt1->next = NULL;
540 
542 
543  if (!q->pkt_list.tail) {
544  q->pkt_list.head = pkt1;
545  } else {
546  q->pkt_list.tail->next = pkt1;
547  }
548 
549  q->pkt_list.tail = pkt1;
550  q->nb_packets++;
551  q->size += pkt1->pkt.size + sizeof(*pkt1);
552 
554 
556  return 0;
557 }
558 
560 {
561  int ret;
562 
564 
565  for (;; ) {
566  PacketListEntry *pkt1 = q->pkt_list.head;
567  if (pkt1) {
568  q->pkt_list.head = pkt1->next;
569  if (!q->pkt_list.head) {
570  q->pkt_list.tail = NULL;
571  }
572  q->nb_packets--;
573  q->size -= pkt1->pkt.size + sizeof(*pkt1);
574  *pkt = pkt1->pkt;
575  av_free(pkt1);
576  ret = 1;
577  break;
578  } else if (!block) {
579  ret = 0;
580  break;
581  } else {
582  pthread_cond_wait(&q->cond, &q->mutex);
583  }
584  }
586  return ret;
587 }
588 
589 static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
590 {
591  const uint8_t KLV_DID = 0x44;
592  const uint8_t KLV_IN_VANC_SDID = 0x04;
593 
594  struct KLVPacket
595  {
596  uint16_t sequence_counter;
597  std::vector<uint8_t> data;
598  };
599 
600  size_t total_size = 0;
601  std::vector<std::vector<KLVPacket>> klv_packets(256);
602 
603  IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
604  if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
605  return;
606 
607  IDeckLinkAncillaryPacketIterator *it = nullptr;
608  if (packets->GetPacketIterator(&it) != S_OK) {
609  packets->Release();
610  return;
611  }
612 
613  IDeckLinkAncillaryPacket *packet = nullptr;
614  while (it->Next(&packet) == S_OK) {
615  uint8_t *data = nullptr;
616  uint32_t size = 0;
617 
618  if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
619  av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
620 
621  if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
622  // MID and PSC
623  if (size > 3) {
624  uint8_t mid = data[0];
625  uint16_t psc = data[1] << 8 | data[2];
626 
627  av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
628 
629  auto& list = klv_packets[mid];
630  uint16_t expected_psc = list.size() + 1;
631 
632  if (psc == expected_psc) {
633  uint32_t data_len = size - 3;
634  total_size += data_len;
635 
636  KLVPacket packet{ psc };
637  packet.data.resize(data_len);
638  memcpy(packet.data.data(), data + 3, data_len);
639 
640  list.push_back(std::move(packet));
641  } else {
642  av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
643 
644  if (!list.empty()) {
645  for (auto& klv : list)
646  total_size -= klv.data.size();
647 
648  list.clear();
649  }
650  }
651  }
652  }
653  }
654 
655  packet->Release();
656  }
657 
658  it->Release();
659  packets->Release();
660 
661  if (total_size > 0) {
662  std::vector<uint8_t> klv;
663  klv.reserve(total_size);
664 
665  for (size_t i = 0; i < klv_packets.size(); ++i) {
666  auto& list = klv_packets[i];
667 
668  if (list.empty())
669  continue;
670 
671  av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
672 
673  for (auto& packet : list)
674  klv.insert(klv.end(), packet.data.begin(), packet.data.end());
675  }
676 
677  AVPacket klv_packet = { 0 };
678  klv_packet.pts = pts;
679  klv_packet.dts = pts;
680  klv_packet.flags |= AV_PKT_FLAG_KEY;
681  klv_packet.stream_index = ctx->klv_st->index;
682  klv_packet.data = klv.data();
683  klv_packet.size = klv.size();
684 
685  if (avpacket_queue_put(&ctx->queue, &klv_packet) < 0) {
686  ++ctx->dropped;
687  }
688  }
689 }
690 
691 class decklink_input_callback : public IDeckLinkInputCallback
692 {
693 public:
696 
697  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
698  virtual ULONG STDMETHODCALLTYPE AddRef(void);
699  virtual ULONG STDMETHODCALLTYPE Release(void);
700  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
701  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
702 
703 private:
704  std::atomic<int> _refs;
707  int no_video;
710 };
711 
713 {
714  avctx = _avctx;
715  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
716  ctx = (struct decklink_ctx *)cctx->ctx;
717  no_video = 0;
719 }
720 
722 {
723 }
724 
726 {
727  return ++_refs;
728 }
729 
731 {
732  int ret = --_refs;
733  if (!ret)
734  delete this;
735  return ret;
736 }
737 
738 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
739  IDeckLinkAudioInputPacket *audioFrame,
740  int64_t wallclock,
741  int64_t abs_wallclock,
742  DecklinkPtsSource pts_src,
743  AVRational time_base, int64_t *initial_pts,
744  int copyts)
745 {
746  int64_t pts = AV_NOPTS_VALUE;
747  BMDTimeValue bmd_pts;
748  BMDTimeValue bmd_duration;
749  HRESULT res = E_INVALIDARG;
750  switch (pts_src) {
751  case PTS_SRC_AUDIO:
752  if (audioFrame)
753  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
754  break;
755  case PTS_SRC_VIDEO:
756  if (videoFrame)
757  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
758  break;
759  case PTS_SRC_REFERENCE:
760  if (videoFrame)
761  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
762  break;
763  case PTS_SRC_WALLCLOCK:
764  /* fall through */
766  {
767  /* MSVC does not support compound literals like AV_TIME_BASE_Q
768  * in C++ code (compiler error C4576) */
769  AVRational timebase;
770  timebase.num = 1;
771  timebase.den = AV_TIME_BASE;
772  if (pts_src == PTS_SRC_WALLCLOCK)
773  pts = av_rescale_q(wallclock, timebase, time_base);
774  else
775  pts = av_rescale_q(abs_wallclock, timebase, time_base);
776  break;
777  }
778  }
779  if (res == S_OK)
780  pts = bmd_pts / time_base.num;
781 
782  if (!copyts) {
783  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
784  *initial_pts = pts;
785  if (*initial_pts != AV_NOPTS_VALUE)
786  pts -= *initial_pts;
787  }
788 
789  return pts;
790 }
791 
792 static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
793 {
794  IDeckLinkTimecode *timecode;
795  int ret = AVERROR(ENOENT);
796 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
797  int hfr = (tc_format == bmdTimecodeRP188HighFrameRate);
798 #else
799  int hfr = 0;
800 #endif
801  if (videoFrame->GetTimecode(tc_format, &timecode) == S_OK) {
802  uint8_t hh, mm, ss, ff;
803  if (timecode->GetComponents(&hh, &mm, &ss, &ff) == S_OK) {
804  int flags = (timecode->GetFlags() & bmdTimecodeIsDropFrame) ? AV_TIMECODE_FLAG_DROPFRAME : 0;
805  if (!hfr && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1)
806  ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
807  ret = av_timecode_init_from_components(tc, frame_rate, flags, hh, mm, ss, ff, avctx);
808  }
809  timecode->Release();
810  }
811  return ret;
812 }
813 
814 static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
815 {
816  AVRational frame_rate = ctx->video_st->r_frame_rate;
817  int ret;
818  /* 50/60 fps content has alternating VITC1 and VITC2 timecode (see SMPTE ST
819  * 12-2, section 7), so the native ordering of RP188Any (HFR, VITC1, LTC,
820  * VITC2) would not work because LTC might not contain the field flag.
821  * Therefore we query the types manually. */
822  if (ctx->tc_format == bmdTimecodeRP188Any && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1) {
823 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
824  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
825  if (ret == AVERROR(ENOENT))
826 #endif
827  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC1, videoFrame);
828  if (ret == AVERROR(ENOENT))
829  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC2, videoFrame);
830  if (ret == AVERROR(ENOENT))
831  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188LTC, videoFrame);
832  } else {
833  ret = get_bmd_timecode(avctx, tc, frame_rate, ctx->tc_format, videoFrame);
834  }
835  return ret;
836 }
837 
839  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
840 {
841  void *frameBytes;
842  void *audioFrameBytes;
843  BMDTimeValue frameTime;
844  BMDTimeValue frameDuration;
845  int64_t wallclock = 0, abs_wallclock = 0;
846  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
847 
848  if (ctx->autodetect) {
849  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
850  ctx->bmd_mode == bmdModeUnknown)
851  {
853  }
854  return S_OK;
855  }
856 
857  // Drop the frames till system's timestamp aligns with the configured value.
858  if (0 == ctx->frameCount && cctx->timestamp_align) {
859  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
860  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
861  if (av_cmp_q(remainder, frame_duration) > 0) {
862  ++ctx->dropped;
863  return S_OK;
864  }
865  }
866 
867  ctx->frameCount++;
869  wallclock = av_gettime_relative();
871  abs_wallclock = av_gettime();
872 
873  // Handle Video Frame
874  if (videoFrame) {
875  AVPacket pkt = { 0 };
876  if (ctx->frameCount % 25 == 0) {
877  unsigned long long qsize = avpacket_queue_size(&ctx->queue);
879  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
880  ctx->frameCount,
881  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
882  (double)qsize / 1024 / 1024);
883  }
884 
885  videoFrame->GetBytes(&frameBytes);
886  videoFrame->GetStreamTime(&frameTime, &frameDuration,
888 
889  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
890  if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
891  unsigned bars[8] = {
892  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
893  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
894  int width = videoFrame->GetWidth();
895  int height = videoFrame->GetHeight();
896  unsigned *p = (unsigned *)frameBytes;
897 
898  for (int y = 0; y < height; y++) {
899  for (int x = 0; x < width; x += 2)
900  *p++ = bars[(x * 8) / width];
901  }
902  }
903 
904  if (!no_video) {
905  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
906  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
907  }
908  no_video = 1;
909  } else {
910  if (no_video) {
911  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
912  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
913  }
914  no_video = 0;
915 
916  // Handle Timecode (if requested)
917  if (ctx->tc_format) {
918  AVTimecode tcr;
919  if (get_frame_timecode(avctx, ctx, &tcr, videoFrame) >= 0) {
920  char tcstr[AV_TIMECODE_STR_SIZE];
921  const char *tc = av_timecode_make_string(&tcr, tcstr, 0);
922  if (tc) {
923  AVDictionary* metadata_dict = NULL;
924  uint8_t* packed_metadata;
925 
926  if (av_cmp_q(ctx->video_st->r_frame_rate, av_make_q(60, 1)) < 1) {
927  uint32_t tc_data = av_timecode_get_smpte_from_framenum(&tcr, 0);
928  int size = sizeof(uint32_t) * 4;
929  uint32_t *sd = (uint32_t *)av_packet_new_side_data(&pkt, AV_PKT_DATA_S12M_TIMECODE, size);
930 
931  if (sd) {
932  *sd = 1; // one TC
933  *(sd + 1) = tc_data; // TC
934  }
935  }
936 
937  if (av_dict_set(&metadata_dict, "timecode", tc, 0) >= 0) {
938  size_t metadata_len;
939  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
940  av_dict_free(&metadata_dict);
941  if (packed_metadata) {
942  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
943  av_freep(&packed_metadata);
944  else if (!ctx->tc_seen)
946  }
947  }
948  }
949  } else {
950  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
951  }
952  }
953  }
954 
955  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
956 
957  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
958  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
959  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
960  return S_OK;
961  }
962 
963  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
964  pkt.dts = pkt.pts;
965 
966  pkt.duration = frameDuration;
967  //To be made sure it still applies
970  pkt.data = (uint8_t *)frameBytes;
971  pkt.size = videoFrame->GetRowBytes() *
972  videoFrame->GetHeight();
973  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
974 
975  if (!no_video) {
976  IDeckLinkVideoFrameAncillary *vanc;
977  AVPacket txt_pkt = { 0 };
978  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
979  uint8_t *txt_buf = txt_buf0;
980 
981  if (ctx->enable_klv) {
982  handle_klv(avctx, ctx, videoFrame, pkt.pts);
983  }
984 
985  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
986  int i;
987  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
988  txt_buf[0] = 0x10; // data_identifier - EBU_data
989  txt_buf++;
990 #if CONFIG_LIBZVBI
991  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
992  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
993  int64_t line_mask = 1;
994  av_assert0(videoFrame->GetWidth() == 720);
995  for (i = 6; i < 336; i++, line_mask <<= 1) {
996  uint8_t *buf;
997  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
998  if (vanc_format == bmdFormat8BitYUV)
999  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
1000  else
1001  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
1002  }
1003  if (i == 22)
1004  i = 317;
1005  }
1006  }
1007 #endif
1008  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
1009  int idx = get_vanc_line_idx(ctx->bmd_mode);
1010  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
1011  uint8_t *buf;
1012  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
1013  uint16_t vanc[MAX_WIDTH_VANC];
1014  size_t vanc_size = videoFrame->GetWidth();
1015  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
1016  vanc_size = vanc_size * 2;
1017  unpack_v210(vanc, buf, videoFrame->GetWidth());
1018  } else {
1019  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
1020  }
1021  txt_buf = get_metadata(avctx, vanc, vanc_size,
1022  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
1023  }
1024  if (i == vanc_line_numbers[idx].field0_vanc_end)
1026  }
1027  }
1028  vanc->Release();
1029  if (txt_buf - txt_buf0 > 1) {
1030  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
1031  while (stuffing_units--) {
1032  memset(txt_buf, 0xff, 46);
1033  txt_buf[1] = 0x2c; // data_unit_length
1034  txt_buf += 46;
1035  }
1036  txt_pkt.pts = pkt.pts;
1037  txt_pkt.dts = pkt.dts;
1038  txt_pkt.stream_index = ctx->teletext_st->index;
1039  txt_pkt.data = txt_buf0;
1040  txt_pkt.size = txt_buf - txt_buf0;
1041  if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
1042  ++ctx->dropped;
1043  }
1044  }
1045  }
1046  }
1047 
1049  if (pkt.buf)
1050  videoFrame->AddRef();
1051 
1052  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1053  ++ctx->dropped;
1054  }
1055  }
1056 
1057  // Handle Audio Frame
1058  if (audioFrame) {
1059  AVPacket pkt = { 0 };
1060  BMDTimeValue audio_pts;
1061 
1062  //hack among hacks
1063  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
1064  audioFrame->GetBytes(&audioFrameBytes);
1065  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
1066  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
1067  pkt.dts = pkt.pts;
1068 
1069  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
1072  pkt.data = (uint8_t *)audioFrameBytes;
1073 
1074  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1075  ++ctx->dropped;
1076  }
1077  }
1078 
1079  return S_OK;
1080 }
1081 
1083  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
1084  BMDDetectedVideoInputFormatFlags formatFlags)
1085 {
1086  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
1087  ctx->bmd_mode = mode->GetDisplayMode();
1088  // check the C context member to make sure we set both raw_format and bmd_mode with data from the same format change callback
1089  if (!cctx->raw_format)
1090  ctx->raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
1091  return S_OK;
1092 }
1093 
1094 static int decklink_autodetect(struct decklink_cctx *cctx) {
1095  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1096  DECKLINK_BOOL autodetect_supported = false;
1097  int i;
1098 
1099  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1100  return -1;
1101  if (autodetect_supported == false)
1102  return -1;
1103 
1104  ctx->autodetect = 1;
1105  ctx->bmd_mode = bmdModeUnknown;
1106  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
1107  bmdFormat8BitYUV,
1108  bmdVideoInputEnableFormatDetection) != S_OK) {
1109  return -1;
1110  }
1111 
1112  if (ctx->dli->StartStreams() != S_OK) {
1113  return -1;
1114  }
1115 
1116  // 3 second timeout
1117  for (i = 0; i < 30; i++) {
1118  av_usleep(100000);
1119  /* Sometimes VideoInputFrameArrived is called without the
1120  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
1121  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
1122  if (ctx->bmd_mode != bmdModeUnknown &&
1123  ctx->bmd_mode != AUTODETECT_DEFAULT_MODE)
1124  break;
1125  }
1126 
1127  ctx->dli->PauseStreams();
1128  ctx->dli->FlushStreams();
1129  ctx->autodetect = 0;
1130  if (ctx->bmd_mode != bmdModeUnknown) {
1131  cctx->format_code = (char *)av_mallocz(5);
1132  if (!cctx->format_code)
1133  return -1;
1134  AV_WB32(cctx->format_code, ctx->bmd_mode);
1135  return 0;
1136  } else {
1137  return -1;
1138  }
1139 
1140 }
1141 
1142 extern "C" {
1143 
1145 {
1146  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1147  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1148 
1149  if (ctx->dli) {
1150  ctx->dli->StopStreams();
1151  ctx->dli->DisableVideoInput();
1152  ctx->dli->DisableAudioInput();
1153  }
1154 
1155  ff_decklink_cleanup(avctx);
1156  avpacket_queue_end(&ctx->queue);
1157 
1158  av_freep(&cctx->ctx);
1159 
1160  return 0;
1161 }
1162 
1164 {
1165  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1166  struct decklink_ctx *ctx;
1167  class decklink_allocator *allocator;
1169  AVStream *st;
1170  HRESULT result;
1171  int ret;
1172 
1173  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1174  if (!ctx)
1175  return AVERROR(ENOMEM);
1176  ctx->list_devices = cctx->list_devices;
1177  ctx->list_formats = cctx->list_formats;
1178  ctx->enable_klv = cctx->enable_klv;
1179  ctx->teletext_lines = cctx->teletext_lines;
1180  ctx->preroll = cctx->preroll;
1181  ctx->duplex_mode = cctx->duplex_mode;
1182  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1183  ctx->tc_format = decklink_timecode_format_map[cctx->tc_format];
1184  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1185  ctx->video_input = decklink_video_connection_map[cctx->video_input];
1186  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1187  ctx->audio_input = decklink_audio_connection_map[cctx->audio_input];
1188  ctx->audio_pts_source = cctx->audio_pts_source;
1189  ctx->video_pts_source = cctx->video_pts_source;
1190  ctx->draw_bars = cctx->draw_bars;
1191  ctx->audio_depth = cctx->audio_depth;
1192  if (cctx->raw_format > 0 && (unsigned int)cctx->raw_format < FF_ARRAY_ELEMS(decklink_raw_format_map))
1193  ctx->raw_format = decklink_raw_format_map[cctx->raw_format];
1194  cctx->ctx = ctx;
1195 
1196  /* Check audio channel option for valid values: 2, 8 or 16 */
1197  switch (cctx->audio_channels) {
1198  case 2:
1199  case 8:
1200  case 16:
1201  break;
1202  default:
1203  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1204  return AVERROR(EINVAL);
1205  }
1206 
1207  /* Check audio bit depth option for valid values: 16 or 32 */
1208  switch (cctx->audio_depth) {
1209  case 16:
1210  case 32:
1211  break;
1212  default:
1213  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1214  return AVERROR(EINVAL);
1215  }
1216 
1217  /* List available devices. */
1218  if (ctx->list_devices) {
1219  ff_decklink_list_devices_legacy(avctx, 1, 0);
1220  return AVERROR_EXIT;
1221  }
1222 
1223  ret = ff_decklink_init_device(avctx, avctx->url);
1224  if (ret < 0)
1225  return ret;
1226 
1227  /* Get input device. */
1228  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1229  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1230  avctx->url);
1231  ret = AVERROR(EIO);
1232  goto error;
1233  }
1234 
1235  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1236  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1237  ret = AVERROR(EIO);
1238  goto error;
1239  }
1240 
1241  /* List supported formats. */
1242  if (ctx->list_formats) {
1244  ret = AVERROR_EXIT;
1245  goto error;
1246  }
1247 
1249  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1250  input_callback->Release();
1251  if (ret < 0) {
1252  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1253  goto error;
1254  }
1255 
1256  allocator = new decklink_allocator();
1257  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1258  allocator->Release();
1259  if (ret < 0) {
1260  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1261  goto error;
1262  }
1263 
1264  if (!cctx->format_code) {
1265  if (decklink_autodetect(cctx) < 0) {
1266  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1267  ret = AVERROR(EIO);
1268  goto error;
1269  }
1270  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1271  }
1272  if (ctx->raw_format == (BMDPixelFormat)0)
1273  ctx->raw_format = bmdFormat8BitYUV;
1274  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1275  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1276  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1277  ret = AVERROR(EIO);
1278  goto error;
1279  }
1280 
1281 #if !CONFIG_LIBZVBI
1282  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1283  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1284  ret = AVERROR(ENOSYS);
1285  goto error;
1286  }
1287 #endif
1288 
1289  /* Setup streams. */
1290  st = avformat_new_stream(avctx, NULL);
1291  if (!st) {
1292  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1293  ret = AVERROR(ENOMEM);
1294  goto error;
1295  }
1296  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1297  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1298  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1299  st->codecpar->channels = cctx->audio_channels;
1300  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1301  ctx->audio_st=st;
1302 
1303  st = avformat_new_stream(avctx, NULL);
1304  if (!st) {
1305  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1306  ret = AVERROR(ENOMEM);
1307  goto error;
1308  }
1309  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1310  st->codecpar->width = ctx->bmd_width;
1311  st->codecpar->height = ctx->bmd_height;
1312 
1313  st->time_base.den = ctx->bmd_tb_den;
1314  st->time_base.num = ctx->bmd_tb_num;
1315  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1316 
1317  switch(ctx->raw_format) {
1318  case bmdFormat8BitYUV:
1319  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1320  st->codecpar->format = AV_PIX_FMT_UYVY422;
1321  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1322  break;
1323  case bmdFormat10BitYUV:
1324  st->codecpar->codec_id = AV_CODEC_ID_V210;
1325  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1326  break;
1327  case bmdFormat8BitARGB:
1328  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1329  st->codecpar->format = AV_PIX_FMT_0RGB;
1330  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1331  break;
1332  case bmdFormat8BitBGRA:
1333  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1334  st->codecpar->format = AV_PIX_FMT_BGR0;
1335  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1336  break;
1337  case bmdFormat10BitRGB:
1338  st->codecpar->codec_id = AV_CODEC_ID_R210;
1339  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1340  break;
1341  default:
1342  char fourcc_str[AV_FOURCC_MAX_STRING_SIZE] = {0};
1343  av_fourcc_make_string(fourcc_str, ctx->raw_format);
1344  av_log(avctx, AV_LOG_ERROR, "Raw Format %s not supported\n", fourcc_str);
1345  ret = AVERROR(EINVAL);
1346  goto error;
1347  }
1348 
1349  switch (ctx->bmd_field_dominance) {
1350  case bmdUpperFieldFirst:
1351  st->codecpar->field_order = AV_FIELD_TT;
1352  break;
1353  case bmdLowerFieldFirst:
1354  st->codecpar->field_order = AV_FIELD_BB;
1355  break;
1356  case bmdProgressiveFrame:
1357  case bmdProgressiveSegmentedFrame:
1358  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1359  break;
1360  }
1361 
1362  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1363 
1364  ctx->video_st=st;
1365 
1366  if (ctx->enable_klv) {
1367  st = avformat_new_stream(avctx, NULL);
1368  if (!st) {
1369  ret = AVERROR(ENOMEM);
1370  goto error;
1371  }
1372  st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
1373  st->time_base.den = ctx->bmd_tb_den;
1374  st->time_base.num = ctx->bmd_tb_num;
1375  st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
1376  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1377  ctx->klv_st = st;
1378  }
1379 
1380  if (ctx->teletext_lines) {
1381  st = avformat_new_stream(avctx, NULL);
1382  if (!st) {
1383  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1384  ret = AVERROR(ENOMEM);
1385  goto error;
1386  }
1387  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1388  st->time_base.den = ctx->bmd_tb_den;
1389  st->time_base.num = ctx->bmd_tb_num;
1390  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1391  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1392  ctx->teletext_st = st;
1393  }
1394 
1395  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
1396  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
1397 
1398  if (result != S_OK) {
1399  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1400  ret = AVERROR(EIO);
1401  goto error;
1402  }
1403 
1404  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1405  ctx->raw_format,
1406  bmdVideoInputFlagDefault);
1407 
1408  if (result != S_OK) {
1409  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1410  ret = AVERROR(EIO);
1411  goto error;
1412  }
1413 
1414  avpacket_queue_init (avctx, &ctx->queue);
1415 
1416  if (ctx->dli->StartStreams() != S_OK) {
1417  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1418  ret = AVERROR(EIO);
1419  goto error;
1420  }
1421 
1422  return 0;
1423 
1424 error:
1425  ff_decklink_cleanup(avctx);
1426  return ret;
1427 }
1428 
1430 {
1431  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1432  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1433 
1434  avpacket_queue_get(&ctx->queue, pkt, 1);
1435 
1436  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1437  size_t size;
1438  const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
1439  if (side_metadata) {
1440  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1441  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1442  }
1443  }
1444 
1445  return 0;
1446 }
1447 
1449 {
1450  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1451 }
1452 
1453 } /* extern "C" */
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
AV_CODEC_ID_PCM_S16LE
@ AV_CODEC_ID_PCM_S16LE
Definition: codec_id.h:314
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:424
VANCLineNumber::vanc_end
int vanc_end
Definition: decklink_dec.cpp:67
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
PacketList::head
PacketListEntry * head
Definition: packet_internal.h:32
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:768
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:63
AVPacket::data
uint8_t * data
Definition: packet.h:373
AV_CODEC_ID_DVB_TELETEXT
@ AV_CODEC_ID_DVB_TELETEXT
Definition: codec_id.h:529
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
data
const char data[16]
Definition: mxf.c:143
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
reverse.h
mathematics.h
AVDictionary
Definition: dict.c:30
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:428
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:198
AV_FOURCC_MAX_STRING_SIZE
#define AV_FOURCC_MAX_STRING_SIZE
Definition: avutil.h:346
AVPacketQueue::pkt_list
PacketList pkt_list
Definition: decklink_common.h:81
timecode.h
AV_CODEC_ID_R210
@ AV_CODEC_ID_R210
Definition: codec_id.h:183
av_timecode_make_string
char * av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
Load timecode string in buf.
Definition: timecode.c:102
pts
static int64_t pts
Definition: transcode_aac.c:653
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_PKT_DATA_STRINGS_METADATA
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
Definition: packet.h:172
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1429
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
width
#define width
AVPacketQueue
Definition: decklink_common.h:80
intreadwrite.h
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_parity
#define av_parity
Definition: common.h:156
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
PacketList::tail
PacketListEntry * tail
Definition: packet_internal.h:32
if
if(ret)
Definition: filter_design.txt:179
AVFormatContext
Format I/O context.
Definition: avformat.h:1200
internal.h
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1095
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:356
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:965
NULL
#define NULL
Definition: coverity.c:32
VANCLineNumber::field1_vanc_start
int field1_vanc_start
Definition: decklink_dec.cpp:66
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
src
#define src
Definition: vp8dsp.c:255
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
VANCLineNumber::field0_vanc_end
int field0_vanc_end
Definition: decklink_dec.cpp:65
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
time.h
AV_CODEC_ID_SMPTE_KLV
@ AV_CODEC_ID_SMPTE_KLV
Definition: codec_id.h:559
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:481
PacketListEntry::next
struct PacketListEntry * next
Definition: packet_internal.h:27
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:68
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
av_timecode_init_from_components
int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
Init a timecode struct from the passed timecode components.
Definition: timecode.c:229
AVPacket::size
int size
Definition: packet.h:374
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1283
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_fourcc_make_string
char * av_fourcc_make_string(char *buf, uint32_t fourcc)
Fill the provided buffer with a string containing a FourCC (four-character code) representation.
Definition: utils.c:116
AV_CODEC_ID_V210
@ AV_CODEC_ID_V210
Definition: codec_id.h:177
PacketListEntry::pkt
AVPacket pkt
Definition: packet_internal.h:28
avdevice.h
av_packet_unpack_dictionary
int av_packet_unpack_dictionary(const uint8_t *data, size_t size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:344
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
height
#define height
VANCLineNumber::mode
BMDDisplayMode mode
Definition: decklink_dec.cpp:63
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
line
Definition: graph2dot.c:48
av_packet_pack_dictionary
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
Pack a dictionary for use in side_data.
Definition: avpacket.c:309
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:487
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
av_timecode_get_smpte_from_framenum
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
Convert frame number to SMPTE 12M binary representation.
Definition: timecode.c:52
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
PacketListEntry
Definition: packet_internal.h:26
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:253
VANCLineNumber
Definition: decklink_dec.cpp:62
internal.h
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
common.h
AV_PKT_DATA_A53_CC
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:128
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
AV_TIMECODE_FLAG_DROPFRAME
@ AV_TIMECODE_FLAG_DROPFRAME
timecode is drop frame
Definition: timecode.h:36
AVPacketQueue::cond
pthread_cond_t cond
Definition: decklink_common.h:86
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:935
KLVPacket
Definition: mxf.h:71
AVDeviceInfoList
List of devices.
Definition: avdevice.h:467
avformat.h
checksum
static volatile int checksum
Definition: adler32.c:30
AV_PKT_DATA_S12M_TIMECODE
@ AV_PKT_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
AVPacketQueue::size
unsigned long long size
Definition: decklink_common.h:83
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:943
input_callback
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:203
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:232
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AVPacketQueue::nb_packets
int nb_packets
Definition: decklink_common.h:82
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1084
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:1196
AVPacket::stream_index
int stream_index
Definition: packet.h:375
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
shift
static int shift(int a, int b)
Definition: sonic.c:83
tc
#define tc
Definition: regdef.h:69
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_PCM_S32LE
@ AV_CODEC_ID_PCM_S32LE
Definition: codec_id.h:322
avutil.h
AVPacketQueue::avctx
AVFormatContext * avctx
Definition: decklink_common.h:87
packet_internal.h
it
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s it
Definition: writing_filters.txt:31
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacketQueue::mutex
pthread_mutex_t mutex
Definition: decklink_common.h:85
AVPacket
This structure stores compressed data.
Definition: packet.h:350
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
VANCLineNumber::vanc_start
int vanc_start
Definition: decklink_dec.cpp:64
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:227
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVTimecode
Definition: timecode.h:41
AVPacketQueue::max_q_size
int64_t max_q_size
Definition: decklink_common.h:88
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1228
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:64