FFmpeg
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 #include <vector>
26 using std::atomic;
27 
28 /* Include internal.h first to avoid conflict between winsock.h (used by
29  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
30 extern "C" {
31 #include "libavformat/internal.h"
32 }
33 
34 #include <DeckLinkAPI.h>
35 
36 extern "C" {
37 #include "config.h"
39 #include "libavformat/avformat.h"
40 #include "libavutil/avassert.h"
41 #include "libavutil/avutil.h"
42 #include "libavutil/common.h"
43 #include "libavutil/internal.h"
44 #include "libavutil/imgutils.h"
45 #include "libavutil/intreadwrite.h"
46 #include "libavutil/time.h"
47 #include "libavutil/timecode.h"
48 #include "libavutil/mathematics.h"
49 #include "libavutil/reverse.h"
50 #include "avdevice.h"
51 #if CONFIG_LIBZVBI
52 #include <libzvbi.h>
53 #endif
54 }
55 
56 #include "decklink_common.h"
57 #include "decklink_dec.h"
58 
59 #define MAX_WIDTH_VANC 1920
60 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
61 
62 typedef struct VANCLineNumber {
63  BMDDisplayMode mode;
67  int vanc_end;
69 
70 /* These VANC line numbers need not be very accurate. In any case
71  * GetBufferForVerticalBlankingLine() will return an error when invalid
72  * ancillary line number was requested. We just need to make sure that the
73  * entire VANC region is covered, while making sure we don't decode VANC of
74  * another source during switching*/
76  /* SD Modes */
77 
78  {bmdModeNTSC, 11, 19, 274, 282},
79  {bmdModeNTSC2398, 11, 19, 274, 282},
80  {bmdModePAL, 7, 22, 320, 335},
81  {bmdModeNTSCp, 11, -1, -1, 39},
82  {bmdModePALp, 7, -1, -1, 45},
83 
84  /* HD 1080 Modes */
85 
86  {bmdModeHD1080p2398, 8, -1, -1, 42},
87  {bmdModeHD1080p24, 8, -1, -1, 42},
88  {bmdModeHD1080p25, 8, -1, -1, 42},
89  {bmdModeHD1080p2997, 8, -1, -1, 42},
90  {bmdModeHD1080p30, 8, -1, -1, 42},
91  {bmdModeHD1080i50, 8, 20, 570, 585},
92  {bmdModeHD1080i5994, 8, 20, 570, 585},
93  {bmdModeHD1080i6000, 8, 20, 570, 585},
94  {bmdModeHD1080p50, 8, -1, -1, 42},
95  {bmdModeHD1080p5994, 8, -1, -1, 42},
96  {bmdModeHD1080p6000, 8, -1, -1, 42},
97 
98  /* HD 720 Modes */
99 
100  {bmdModeHD720p50, 8, -1, -1, 26},
101  {bmdModeHD720p5994, 8, -1, -1, 26},
102  {bmdModeHD720p60, 8, -1, -1, 26},
103 
104  /* For all other modes, for which we don't support VANC */
105  {bmdModeUnknown, 0, -1, -1, -1}
106 };
107 
108 class decklink_allocator : public IDeckLinkMemoryAllocator
109 {
110 public:
112  virtual ~decklink_allocator() { }
113 
114  // IDeckLinkMemoryAllocator methods
115  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
116  {
117  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
118  if (!buf)
119  return E_OUTOFMEMORY;
120  *allocatedBuffer = buf;
121  return S_OK;
122  }
123  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
124  {
125  av_free(buffer);
126  return S_OK;
127  }
128  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
129  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
130 
131  // IUnknown methods
132  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
133  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
134  virtual ULONG STDMETHODCALLTYPE Release(void)
135  {
136  int ret = --_refs;
137  if (!ret)
138  delete this;
139  return ret;
140  }
141 
142 private:
143  std::atomic<int> _refs;
144 };
145 
146 extern "C" {
147 static void decklink_object_free(void *opaque, uint8_t *data)
148 {
149  IUnknown *obj = (class IUnknown *)opaque;
150  obj->Release();
151 }
152 }
153 
154 static int get_vanc_line_idx(BMDDisplayMode mode)
155 {
156  unsigned int i;
157  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
158  if (mode == vanc_line_numbers[i].mode)
159  return i;
160  }
161  /* Return the VANC idx for Unknown mode */
162  return i - 1;
163 }
164 
165 static inline void clear_parity_bits(uint16_t *buf, int len) {
166  int i;
167  for (i = 0; i < len; i++)
168  buf[i] &= 0xff;
169 }
170 
171 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
172  int i;
173  uint16_t vanc_sum = 0;
174  for (i = 3; i < len - 1; i++) {
175  uint16_t v = buf[i];
176  int np = v >> 8;
177  int p = av_parity(v & 0xff);
178  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
179  // Parity check failed
180  return -1;
181  }
182  vanc_sum += v;
183  }
184  vanc_sum &= 0x1ff;
185  vanc_sum |= ((~vanc_sum & 0x100) << 1);
186  if (checksum != vanc_sum) {
187  // Checksum verification failed
188  return -1;
189  }
190  return 0;
191 }
192 
193 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
194 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
195 {
196  int i;
197  for (i = 0; i < width / 3; i++) {
198  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
199  *dst++ = src[4] + ((src[5] & 3) << 8);
200  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
201  src += 8;
202  }
203 }
204 
205 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
206 {
207  int i;
208  for (i = 0; i < width * 2 / 3; i++) {
209  *dst++ = src[0] + ((src[1] & 3) << 8);
210  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
211  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
212  src += 4;
213  }
214 }
215 
217 {
218  uint8_t ret = (line < 313) << 5;
219  if (line >= 7 && line <= 22)
220  ret += line;
221  if (line >= 320 && line <= 335)
222  ret += (line - 313);
223  return ret;
224 }
225 
226 static void fill_data_unit_head(int line, uint8_t *tgt)
227 {
228  tgt[0] = 0x02; // data_unit_id
229  tgt[1] = 0x2c; // data_unit_length
230  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
231  tgt[3] = 0xe4; // framing code
232 }
233 
234 #if CONFIG_LIBZVBI
235 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
236 {
237  vbi_bit_slicer slicer;
238 
239  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
240 
241  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
242  return tgt;
243 
245 
246  return tgt + 46;
247 }
248 
249 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
250 {
251  uint8_t y[720];
252  uint8_t *py = y;
253  uint8_t *pend = y + 720;
254  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
255  * so we extract the 8 MSBs of the luma component, that is enough for
256  * teletext bit slicing. */
257  while (py < pend) {
258  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
259  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
260  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
261  src += 8;
262  }
263  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
264 }
265 #endif
266 
267 static uint8_t* teletext_data_unit_from_op47_vbi_packet(int line, uint16_t *py, uint8_t *tgt)
268 {
269  int i;
270 
271  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
272  return tgt;
273 
275 
276  py += 3;
277  tgt += 4;
278 
279  for (i = 0; i < 42; i++)
280  *tgt++ = ff_reverse[py[i] & 255];
281 
282  return tgt;
283 }
284 
286 {
287  int shift = -1;
288  if (line >= 6 && line <= 22)
289  shift = line - 6;
290  if (line >= 318 && line <= 335)
291  shift = line - 318 + 17;
292  return shift >= 0 && ((1ULL << shift) & mask);
293 }
294 
295 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
296 {
297  if (py < pend - 9) {
298  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
299  uint16_t *descriptors = py + 4;
300  int i;
301  py += 9;
302  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
303  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
304  if (line && linemask_matches(line, wanted_lines))
306  }
307  }
308  }
309  return tgt;
310 }
311 
312 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
313 {
314  uint16_t did = py[0]; // data id
315  uint16_t sdid = py[1]; // secondary data id
316  uint16_t dc = py[2] & 255; // data count
317  py += 3;
318  pend = FFMIN(pend, py + dc);
319  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
320  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
321  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
322  py += 2; // priority, line/field
323  while (py < pend - 3) {
324  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
325  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
326  }
327  }
328  return tgt;
329 }
330 
331 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
332  unsigned &cc_count)
333 {
334  size_t i, len = (buf[5] & 0xff) + 6 + 1;
335  uint8_t cdp_sum, rate;
336  uint16_t hdr, ftr;
337  uint8_t *cc;
338  uint16_t *cdp = &buf[6]; // CDP follows
339  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
340  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
341  return NULL;
342  }
343 
344  len -= 7; // remove VANC header and checksum
345 
346  if (cdp[2] != len) {
347  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
348  return NULL;
349  }
350 
351  cdp_sum = 0;
352  for (i = 0; i < len - 1; i++)
353  cdp_sum += cdp[i];
354  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
355  if (cdp[len - 1] != cdp_sum) {
356  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
357  return NULL;
358  }
359 
360  rate = cdp[3];
361  if (!(rate & 0x0f)) {
362  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
363  return NULL;
364  }
365  rate >>= 4;
366  if (rate > 8) {
367  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
368  return NULL;
369  }
370 
371  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
372  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
373  return NULL;
374  }
375 
376  hdr = (cdp[5] << 8) | cdp[6];
377  if (cdp[7] != 0x72) /* ccdata_id */ {
378  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
379  return NULL;
380  }
381 
382  cc_count = cdp[8];
383  if (!(cc_count & 0xe0)) {
384  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
385  return NULL;
386  }
387 
388  cc_count &= 0x1f;
389  if ((len - 13) < cc_count * 3) {
390  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
391  return NULL;
392  }
393 
394  if (cdp[len - 4] != 0x74) /* footer id */ {
395  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
396  return NULL;
397  }
398 
399  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
400  if (ftr != hdr) {
401  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
402  return NULL;
403  }
404 
405  cc = (uint8_t *)av_malloc(cc_count * 3);
406  if (cc == NULL) {
407  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
408  return NULL;
409  }
410 
411  for (size_t i = 0; i < cc_count; i++) {
412  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
413  cc[3*i + 1] = cdp[9 + 3*i+1];
414  cc[3*i + 2] = cdp[9 + 3*i+2];
415  }
416 
417  cc_count *= 3;
418  return cc;
419 }
420 
421 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
422  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
423 {
424  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
425  uint16_t *max_buf = buf + width;
426 
427  while (buf < max_buf - 6) {
428  int len;
429  uint16_t did = buf[3] & 0xFF; // data id
430  uint16_t sdid = buf[4] & 0xFF; // secondary data id
431  /* Check for VANC header */
432  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
433  return tgt;
434  }
435 
436  len = (buf[5] & 0xff) + 6 + 1;
437  if (len > max_buf - buf) {
438  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
439  len, max_buf - buf);
440  return tgt;
441  }
442 
443  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
444  width == 1920 && tgt_size >= 1920) {
445  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
446  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
447  goto skip_packet;
448  }
449  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
450  } else if (did == 0x61 && sdid == 0x01) {
451  unsigned int data_len;
452  uint8_t *data;
453  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
454  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
455  goto skip_packet;
456  }
457  clear_parity_bits(buf, len);
458  data = vanc_to_cc(avctx, buf, width, data_len);
459  if (data) {
460  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
461  av_free(data);
462  }
463  } else {
464  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
465  did, sdid);
466  }
467 skip_packet:
468  buf += len;
469  }
470 
471  return tgt;
472 }
473 
474 
475 static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
476 {
477  const uint8_t KLV_DID = 0x44;
478  const uint8_t KLV_IN_VANC_SDID = 0x04;
479 
480  struct KLVPacket
481  {
482  uint16_t sequence_counter;
483  std::vector<uint8_t> data;
484  };
485 
486  size_t total_size = 0;
487  std::vector<std::vector<KLVPacket>> klv_packets(256);
488 
489  IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
490  if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
491  return;
492 
493  IDeckLinkAncillaryPacketIterator *it = nullptr;
494  if (packets->GetPacketIterator(&it) != S_OK) {
495  packets->Release();
496  return;
497  }
498 
499  IDeckLinkAncillaryPacket *packet = nullptr;
500  while (it->Next(&packet) == S_OK) {
501  uint8_t *data = nullptr;
502  uint32_t size = 0;
503 
504  if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
505  av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
506 
507  if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
508  // MID and PSC
509  if (size > 3) {
510  uint8_t mid = data[0];
511  uint16_t psc = data[1] << 8 | data[2];
512 
513  av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
514 
515  auto& list = klv_packets[mid];
516  uint16_t expected_psc = list.size() + 1;
517 
518  if (psc == expected_psc) {
519  uint32_t data_len = size - 3;
520  total_size += data_len;
521 
522  KLVPacket packet{ psc };
523  packet.data.resize(data_len);
524  memcpy(packet.data.data(), data + 3, data_len);
525 
526  list.push_back(std::move(packet));
527  } else {
528  av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
529 
530  if (!list.empty()) {
531  for (auto& klv : list)
532  total_size -= klv.data.size();
533 
534  list.clear();
535  }
536  }
537  }
538  }
539  }
540 
541  packet->Release();
542  }
543 
544  it->Release();
545  packets->Release();
546 
547  if (total_size > 0) {
548  std::vector<uint8_t> klv;
549  klv.reserve(total_size);
550 
551  for (size_t i = 0; i < klv_packets.size(); ++i) {
552  auto& list = klv_packets[i];
553 
554  if (list.empty())
555  continue;
556 
557  av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
558 
559  for (auto& packet : list)
560  klv.insert(klv.end(), packet.data.begin(), packet.data.end());
561  }
562 
563  AVPacket klv_packet = { 0 };
564  klv_packet.pts = pts;
565  klv_packet.dts = pts;
566  klv_packet.flags |= AV_PKT_FLAG_KEY;
567  klv_packet.stream_index = ctx->klv_st->index;
568  klv_packet.data = klv.data();
569  klv_packet.size = klv.size();
570 
571  if (ff_decklink_packet_queue_put(&ctx->queue, &klv_packet) < 0) {
572  ++ctx->dropped;
573  }
574  }
575 }
576 
577 class decklink_input_callback : public IDeckLinkInputCallback
578 {
579 public:
580  explicit decklink_input_callback(AVFormatContext *_avctx);
582 
583  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
584  virtual ULONG STDMETHODCALLTYPE AddRef(void);
585  virtual ULONG STDMETHODCALLTYPE Release(void);
586  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
587  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
588 
589 private:
590  std::atomic<int> _refs;
593  int no_video;
596  IDeckLinkVideoInputFrame* last_video_frame;
597 };
598 
600 {
601  avctx = _avctx;
602  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
603  ctx = (struct decklink_ctx *)cctx->ctx;
604  no_video = 0;
606  last_video_frame = nullptr;
607 }
608 
610 {
611  if (last_video_frame)
612  last_video_frame->Release();
613 }
614 
616 {
617  return ++_refs;
618 }
619 
621 {
622  int ret = --_refs;
623  if (!ret)
624  delete this;
625  return ret;
626 }
627 
628 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
629  IDeckLinkAudioInputPacket *audioFrame,
630  int64_t wallclock,
631  int64_t abs_wallclock,
632  DecklinkPtsSource pts_src,
633  AVRational time_base, int64_t *initial_pts,
634  int copyts)
635 {
637  BMDTimeValue bmd_pts;
638  BMDTimeValue bmd_duration;
639  HRESULT res = E_INVALIDARG;
640  switch (pts_src) {
641  case PTS_SRC_AUDIO:
642  if (audioFrame)
643  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
644  break;
645  case PTS_SRC_VIDEO:
646  if (videoFrame)
647  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
648  break;
649  case PTS_SRC_REFERENCE:
650  if (videoFrame)
651  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
652  break;
653  case PTS_SRC_WALLCLOCK:
654  /* fall through */
656  {
657  /* MSVC does not support compound literals like AV_TIME_BASE_Q
658  * in C++ code (compiler error C4576) */
659  AVRational timebase;
660  timebase.num = 1;
661  timebase.den = AV_TIME_BASE;
662  if (pts_src == PTS_SRC_WALLCLOCK)
663  pts = av_rescale_q(wallclock, timebase, time_base);
664  else
665  pts = av_rescale_q(abs_wallclock, timebase, time_base);
666  break;
667  }
668  }
669  if (res == S_OK)
670  pts = bmd_pts / time_base.num;
671 
672  if (!copyts) {
673  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
674  *initial_pts = pts;
675  if (*initial_pts != AV_NOPTS_VALUE)
676  pts -= *initial_pts;
677  }
678 
679  return pts;
680 }
681 
682 static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
683 {
684  IDeckLinkTimecode *timecode;
685  int ret = AVERROR(ENOENT);
686 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
687  int hfr = (tc_format == bmdTimecodeRP188HighFrameRate);
688 #else
689  int hfr = 0;
690 #endif
691  if (videoFrame->GetTimecode(tc_format, &timecode) == S_OK) {
692  uint8_t hh, mm, ss, ff;
693  if (timecode->GetComponents(&hh, &mm, &ss, &ff) == S_OK) {
694  int flags = (timecode->GetFlags() & bmdTimecodeIsDropFrame) ? AV_TIMECODE_FLAG_DROPFRAME : 0;
695  if (!hfr && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1)
696  ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
697  ret = av_timecode_init_from_components(tc, frame_rate, flags, hh, mm, ss, ff, avctx);
698  }
699  timecode->Release();
700  }
701  return ret;
702 }
703 
704 static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
705 {
706  AVRational frame_rate = ctx->video_st->r_frame_rate;
707  int ret;
708  /* 50/60 fps content has alternating VITC1 and VITC2 timecode (see SMPTE ST
709  * 12-2, section 7), so the native ordering of RP188Any (HFR, VITC1, LTC,
710  * VITC2) would not work because LTC might not contain the field flag.
711  * Therefore we query the types manually. */
712  if (ctx->tc_format == bmdTimecodeRP188Any && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1) {
713 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
714  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
715  if (ret == AVERROR(ENOENT))
716 #endif
717  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC1, videoFrame);
718  if (ret == AVERROR(ENOENT))
719  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC2, videoFrame);
720  if (ret == AVERROR(ENOENT))
721  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188LTC, videoFrame);
722  } else {
723  ret = get_bmd_timecode(avctx, tc, frame_rate, ctx->tc_format, videoFrame);
724  }
725  return ret;
726 }
727 
729  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
730 {
731  void *frameBytes;
732  void *audioFrameBytes;
733  BMDTimeValue frameTime;
734  BMDTimeValue frameDuration;
735  int64_t wallclock = 0, abs_wallclock = 0;
736  int64_t video_pkt_pts, audio_pkt_pts;
737  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
738 
739  if (ctx->autodetect) {
740  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
741  ctx->bmd_mode == bmdModeUnknown)
742  {
744  }
745  return S_OK;
746  }
747 
748  // Drop the frames till system's timestamp aligns with the configured value.
749  if (0 == ctx->frameCount && cctx->timestamp_align) {
750  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
751  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
752  if (av_cmp_q(remainder, frame_duration) > 0) {
753  ++ctx->dropped;
754  return S_OK;
755  }
756  }
757 
758  ctx->frameCount++;
760  wallclock = av_gettime_relative();
762  abs_wallclock = av_gettime();
763  video_pkt_pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
764  audio_pkt_pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
765 
766  // Handle Video Frame
767  if (videoFrame) {
768  AVPacket pkt = { 0 };
769  if (ctx->frameCount % 25 == 0) {
770  unsigned long long qsize = ff_decklink_packet_queue_size(&ctx->queue);
772  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
773  ctx->frameCount,
774  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
775  (double)qsize / 1024 / 1024);
776  }
777 
778  videoFrame->GetBytes(&frameBytes);
779  videoFrame->GetStreamTime(&frameTime, &frameDuration,
781 
782  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
783  if (ctx->signal_loss_action == SIGNAL_LOSS_BARS && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
784  unsigned bars[8] = {
785  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
786  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
787  int width = videoFrame->GetWidth();
788  int height = videoFrame->GetHeight();
789  unsigned *p = (unsigned *)frameBytes;
790 
791  for (int y = 0; y < height; y++) {
792  for (int x = 0; x < width; x += 2)
793  *p++ = bars[(x * 8) / width];
794  }
796  videoFrame = last_video_frame;
797  videoFrame->GetBytes(&frameBytes);
798  }
799 
800  if (!no_video) {
801  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
802  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
803  }
804  no_video = 1;
805  } else {
807  if (last_video_frame)
808  last_video_frame->Release();
809  last_video_frame = videoFrame;
810  last_video_frame->AddRef();
811  }
812  if (no_video) {
813  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
814  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
815  }
816  no_video = 0;
817 
818  // Handle Timecode (if requested)
819  if (ctx->tc_format) {
820  AVTimecode tcr;
821  if (get_frame_timecode(avctx, ctx, &tcr, videoFrame) >= 0) {
822  char tcstr[AV_TIMECODE_STR_SIZE];
823  const char *tc = av_timecode_make_string(&tcr, tcstr, 0);
824  if (tc) {
825  AVDictionary* metadata_dict = NULL;
826  uint8_t* packed_metadata;
827 
828  if (av_cmp_q(ctx->video_st->r_frame_rate, av_make_q(60, 1)) < 1) {
829  uint32_t tc_data = av_timecode_get_smpte_from_framenum(&tcr, 0);
830  int size = sizeof(uint32_t) * 4;
831  uint32_t *sd = (uint32_t *)av_packet_new_side_data(&pkt, AV_PKT_DATA_S12M_TIMECODE, size);
832 
833  if (sd) {
834  *sd = 1; // one TC
835  *(sd + 1) = tc_data; // TC
836  }
837  }
838 
839  if (av_dict_set(&metadata_dict, "timecode", tc, 0) >= 0) {
840  size_t metadata_len;
841  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
842  av_dict_free(&metadata_dict);
843  if (packed_metadata) {
844  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
845  av_freep(&packed_metadata);
846  else if (!ctx->tc_seen)
848  }
849  }
850  }
851  } else {
852  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
853  }
854  }
855  }
856 
857  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
858 
859  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
860  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
861  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
862  return S_OK;
863  }
864 
865  pkt.pts = video_pkt_pts;
866  pkt.dts = pkt.pts;
867 
868  pkt.duration = frameDuration;
869  //To be made sure it still applies
872  pkt.data = (uint8_t *)frameBytes;
873  pkt.size = videoFrame->GetRowBytes() *
874  videoFrame->GetHeight();
875  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
876 
877  if (!no_video) {
878  IDeckLinkVideoFrameAncillary *vanc;
879  AVPacket txt_pkt = { 0 };
880  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
881  uint8_t *txt_buf = txt_buf0;
882 
883  if (ctx->enable_klv) {
884  handle_klv(avctx, ctx, videoFrame, pkt.pts);
885  }
886 
887  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
888  int i;
889  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
890  txt_buf[0] = 0x10; // data_identifier - EBU_data
891  txt_buf++;
892 #if CONFIG_LIBZVBI
893  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
894  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
895  int64_t line_mask = 1;
896  av_assert0(videoFrame->GetWidth() == 720);
897  for (i = 6; i < 336; i++, line_mask <<= 1) {
898  uint8_t *buf;
899  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
900  if (vanc_format == bmdFormat8BitYUV)
901  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
902  else
903  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
904  }
905  if (i == 22)
906  i = 317;
907  }
908  }
909 #endif
910  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
911  int idx = get_vanc_line_idx(ctx->bmd_mode);
912  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
913  uint8_t *buf;
914  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
915  uint16_t vanc[MAX_WIDTH_VANC];
916  size_t vanc_size = videoFrame->GetWidth();
917  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
918  vanc_size = vanc_size * 2;
919  unpack_v210(vanc, buf, videoFrame->GetWidth());
920  } else {
921  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
922  }
923  txt_buf = get_metadata(avctx, vanc, vanc_size,
924  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
925  }
926  if (i == vanc_line_numbers[idx].field0_vanc_end)
928  }
929  }
930  vanc->Release();
931  if (txt_buf - txt_buf0 > 1) {
932  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
933  while (stuffing_units--) {
934  memset(txt_buf, 0xff, 46);
935  txt_buf[1] = 0x2c; // data_unit_length
936  txt_buf += 46;
937  }
938  txt_pkt.pts = pkt.pts;
939  txt_pkt.dts = pkt.dts;
940  txt_pkt.stream_index = ctx->teletext_st->index;
941  txt_pkt.data = txt_buf0;
942  txt_pkt.size = txt_buf - txt_buf0;
943  if (ff_decklink_packet_queue_put(&ctx->queue, &txt_pkt) < 0) {
944  ++ctx->dropped;
945  }
946  }
947  }
948  }
949 
951  if (pkt.buf)
952  videoFrame->AddRef();
953 
954  if (ff_decklink_packet_queue_put(&ctx->queue, &pkt) < 0) {
955  ++ctx->dropped;
956  }
957  }
958 
959  // Handle Audio Frame
960  if (audioFrame) {
961  AVPacket pkt = { 0 };
962  BMDTimeValue audio_pts;
963 
964  //hack among hacks
965  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->ch_layout.nb_channels * (ctx->audio_depth / 8);
966  audioFrame->GetBytes(&audioFrameBytes);
967  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
968  pkt.pts = audio_pkt_pts;
969  pkt.dts = pkt.pts;
970 
971  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
974  pkt.data = (uint8_t *)audioFrameBytes;
975 
976  if (ff_decklink_packet_queue_put(&ctx->queue, &pkt) < 0) {
977  ++ctx->dropped;
978  }
979  }
980 
981  return S_OK;
982 }
983 
985  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
986  BMDDetectedVideoInputFormatFlags formatFlags)
987 {
988  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
989  ctx->bmd_mode = mode->GetDisplayMode();
990  // check the C context member to make sure we set both raw_format and bmd_mode with data from the same format change callback
991  if (!cctx->raw_format)
992  ctx->raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
993  return S_OK;
994 }
995 
996 static int decklink_autodetect(struct decklink_cctx *cctx) {
997  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
998  DECKLINK_BOOL autodetect_supported = false;
999  int i;
1000 
1001  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1002  return -1;
1003  if (autodetect_supported == false)
1004  return -1;
1005 
1006  ctx->autodetect = 1;
1007  ctx->bmd_mode = bmdModeUnknown;
1008  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
1009  bmdFormat8BitYUV,
1010  bmdVideoInputEnableFormatDetection) != S_OK) {
1011  return -1;
1012  }
1013 
1014  if (ctx->dli->StartStreams() != S_OK) {
1015  return -1;
1016  }
1017 
1018  // 3 second timeout
1019  for (i = 0; i < 30; i++) {
1020  av_usleep(100000);
1021  /* Sometimes VideoInputFrameArrived is called without the
1022  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
1023  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
1024  if (ctx->bmd_mode != bmdModeUnknown &&
1025  ctx->bmd_mode != AUTODETECT_DEFAULT_MODE)
1026  break;
1027  }
1028 
1029  ctx->dli->PauseStreams();
1030  ctx->dli->FlushStreams();
1031  ctx->autodetect = 0;
1032  if (ctx->bmd_mode != bmdModeUnknown) {
1033  cctx->format_code = (char *)av_mallocz(5);
1034  if (!cctx->format_code)
1035  return -1;
1036  AV_WB32(cctx->format_code, ctx->bmd_mode);
1037  return 0;
1038  } else {
1039  return -1;
1040  }
1041 
1042 }
1043 
1044 extern "C" {
1045 
1047 {
1048  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1049  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1050 
1051  if (ctx->dli) {
1052  ctx->dli->StopStreams();
1053  ctx->dli->DisableVideoInput();
1054  ctx->dli->DisableAudioInput();
1055  ctx->dli->SetCallback(nullptr);
1056  }
1057 
1058  ff_decklink_cleanup(avctx);
1060 
1061  av_freep(&cctx->ctx);
1062 
1063  return 0;
1064 }
1065 
1067 {
1068  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1069  struct decklink_ctx *ctx;
1070  class decklink_allocator *allocator;
1072  AVStream *st;
1073  HRESULT result;
1074  int ret;
1075 
1076  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1077  if (!ctx)
1078  return AVERROR(ENOMEM);
1079  ctx->list_devices = cctx->list_devices;
1080  ctx->list_formats = cctx->list_formats;
1081  ctx->enable_klv = cctx->enable_klv;
1082  ctx->teletext_lines = cctx->teletext_lines;
1083  ctx->preroll = cctx->preroll;
1084  ctx->duplex_mode = cctx->duplex_mode;
1085  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1086  ctx->tc_format = decklink_timecode_format_map[cctx->tc_format];
1087  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1088  ctx->video_input = decklink_video_connection_map[cctx->video_input];
1089  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1090  ctx->audio_input = decklink_audio_connection_map[cctx->audio_input];
1091  ctx->audio_pts_source = cctx->audio_pts_source;
1092  ctx->video_pts_source = cctx->video_pts_source;
1093  ctx->draw_bars = cctx->draw_bars;
1094  ctx->signal_loss_action = cctx->signal_loss_action;
1095  if (!ctx->draw_bars && ctx->signal_loss_action == SIGNAL_LOSS_BARS) {
1096  ctx->signal_loss_action = SIGNAL_LOSS_NONE;
1097  av_log(avctx, AV_LOG_WARNING, "Setting signal_loss_action to none because draw_bars is false\n");
1098  }
1099  if (!ctx->draw_bars && ctx->signal_loss_action != SIGNAL_LOSS_NONE) {
1100  av_log(avctx, AV_LOG_ERROR, "options draw_bars and signal_loss_action are mutually exclusive\n");
1101  av_freep(&ctx);
1102  return AVERROR(EINVAL);
1103  }
1104  ctx->audio_depth = cctx->audio_depth;
1105  if (cctx->raw_format > 0 && (unsigned int)cctx->raw_format < FF_ARRAY_ELEMS(decklink_raw_format_map))
1106  ctx->raw_format = decklink_raw_format_map[cctx->raw_format];
1107  cctx->ctx = ctx;
1108 
1109  /* Check audio channel option for valid values: 2, 8 or 16 */
1110  switch (cctx->audio_channels) {
1111  case 2:
1112  case 8:
1113  case 16:
1114  break;
1115  default:
1116  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1117  ret = AVERROR(EINVAL);
1118  goto error;
1119  }
1120 
1121  /* Check audio bit depth option for valid values: 16 or 32 */
1122  switch (cctx->audio_depth) {
1123  case 16:
1124  case 32:
1125  break;
1126  default:
1127  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1128  ret = AVERROR(EINVAL);
1129  goto error;
1130  }
1131 
1132  /* List available devices. */
1133  if (ctx->list_devices) {
1134  ff_decklink_list_devices_legacy(avctx, 1, 0);
1135  ret = AVERROR_EXIT;
1136  goto error;
1137  }
1138 
1139  ret = ff_decklink_init_device(avctx, avctx->url);
1140  if (ret < 0)
1141  goto error;
1142 
1143  /* Get input device. */
1144  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1145  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1146  avctx->url);
1147  ret = AVERROR(EIO);
1148  goto error;
1149  }
1150 
1151  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1152  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1153  ret = AVERROR(EIO);
1154  goto error;
1155  }
1156 
1157  /* List supported formats. */
1158  if (ctx->list_formats) {
1160  ret = AVERROR_EXIT;
1161  goto error;
1162  }
1163 
1165  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1166  input_callback->Release();
1167  if (ret < 0) {
1168  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1169  goto error;
1170  }
1171 
1172  allocator = new decklink_allocator();
1173  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1174  allocator->Release();
1175  if (ret < 0) {
1176  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1177  goto error;
1178  }
1179 
1180  if (!cctx->format_code) {
1181  if (decklink_autodetect(cctx) < 0) {
1182  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1183  ret = AVERROR(EIO);
1184  goto error;
1185  }
1186  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1187  }
1188  if (ctx->raw_format == (BMDPixelFormat)0)
1189  ctx->raw_format = bmdFormat8BitYUV;
1190  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1191  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1192  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1193  ret = AVERROR(EIO);
1194  goto error;
1195  }
1196 
1197 #if !CONFIG_LIBZVBI
1198  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1199  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1200  ret = AVERROR(ENOSYS);
1201  goto error;
1202  }
1203 #endif
1204 
1205  /* Setup streams. */
1206  st = avformat_new_stream(avctx, NULL);
1207  if (!st) {
1208  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1209  ret = AVERROR(ENOMEM);
1210  goto error;
1211  }
1212  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1213  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1214  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1215  st->codecpar->ch_layout.nb_channels = cctx->audio_channels;
1216  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1217  ctx->audio_st=st;
1218 
1219  st = avformat_new_stream(avctx, NULL);
1220  if (!st) {
1221  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1222  ret = AVERROR(ENOMEM);
1223  goto error;
1224  }
1225  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1226  st->codecpar->width = ctx->bmd_width;
1227  st->codecpar->height = ctx->bmd_height;
1228 
1229  st->time_base.den = ctx->bmd_tb_den;
1230  st->time_base.num = ctx->bmd_tb_num;
1231  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1232 
1233  switch(ctx->raw_format) {
1234  case bmdFormat8BitYUV:
1235  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1236  st->codecpar->format = AV_PIX_FMT_UYVY422;
1237  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1238  break;
1239  case bmdFormat10BitYUV:
1240  st->codecpar->codec_id = AV_CODEC_ID_V210;
1241  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1242  break;
1243  case bmdFormat8BitARGB:
1244  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1245  st->codecpar->format = AV_PIX_FMT_0RGB;
1246  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1247  break;
1248  case bmdFormat8BitBGRA:
1249  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1250  st->codecpar->format = AV_PIX_FMT_BGR0;
1251  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1252  break;
1253  case bmdFormat10BitRGB:
1254  st->codecpar->codec_id = AV_CODEC_ID_R210;
1255  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1256  break;
1257  default:
1258  char fourcc_str[AV_FOURCC_MAX_STRING_SIZE] = {0};
1259  av_fourcc_make_string(fourcc_str, ctx->raw_format);
1260  av_log(avctx, AV_LOG_ERROR, "Raw Format %s not supported\n", fourcc_str);
1261  ret = AVERROR(EINVAL);
1262  goto error;
1263  }
1264 
1265  switch (ctx->bmd_field_dominance) {
1266  case bmdUpperFieldFirst:
1267  st->codecpar->field_order = AV_FIELD_TT;
1268  break;
1269  case bmdLowerFieldFirst:
1270  st->codecpar->field_order = AV_FIELD_BB;
1271  break;
1272  case bmdProgressiveFrame:
1273  case bmdProgressiveSegmentedFrame:
1274  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1275  break;
1276  }
1277 
1278  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1279 
1280  ctx->video_st=st;
1281 
1282  if (ctx->enable_klv) {
1283  st = avformat_new_stream(avctx, NULL);
1284  if (!st) {
1285  ret = AVERROR(ENOMEM);
1286  goto error;
1287  }
1288  st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
1289  st->time_base.den = ctx->bmd_tb_den;
1290  st->time_base.num = ctx->bmd_tb_num;
1291  st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
1292  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1293  ctx->klv_st = st;
1294  }
1295 
1296  if (ctx->teletext_lines) {
1297  st = avformat_new_stream(avctx, NULL);
1298  if (!st) {
1299  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1300  ret = AVERROR(ENOMEM);
1301  goto error;
1302  }
1303  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1304  st->time_base.den = ctx->bmd_tb_den;
1305  st->time_base.num = ctx->bmd_tb_num;
1306  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1307  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1308  ctx->teletext_st = st;
1309  }
1310 
1311  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->ch_layout.nb_channels);
1312  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->ch_layout.nb_channels);
1313 
1314  if (result != S_OK) {
1315  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1316  ret = AVERROR(EIO);
1317  goto error;
1318  }
1319 
1320  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1321  ctx->raw_format,
1322  bmdVideoInputFlagDefault);
1323 
1324  if (result != S_OK) {
1325  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1326  ret = AVERROR(EIO);
1327  goto error;
1328  }
1329 
1330  ff_decklink_packet_queue_init(avctx, &ctx->queue, cctx->queue_size);
1331 
1332  if (ctx->dli->StartStreams() != S_OK) {
1333  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1334  ret = AVERROR(EIO);
1335  goto error;
1336  }
1337 
1338  return 0;
1339 
1340 error:
1341  ff_decklink_cleanup(avctx);
1342  av_freep(&cctx->ctx);
1343  return ret;
1344 }
1345 
1347 {
1348  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1349  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1350 
1351  ff_decklink_packet_queue_get(&ctx->queue, pkt, 1);
1352 
1353  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1354  size_t size;
1355  const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
1356  if (side_metadata) {
1357  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1358  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1359  }
1360  }
1361 
1362  return 0;
1363 }
1364 
1366 {
1367  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1368 }
1369 
1370 } /* extern "C" */
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
AV_CODEC_ID_PCM_S16LE
@ AV_CODEC_ID_PCM_S16LE
Definition: codec_id.h:334
VANCLineNumber::vanc_end
int vanc_end
Definition: decklink_dec.cpp:67
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const struct AVCodec *c)
Add a new stream to a media file.
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:202
int64_t
long long int64_t
Definition: coverity.c:34
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:65
mask
int mask
Definition: mediacodecdec_common.c:154
AVPacket::data
uint8_t * data
Definition: packet.h:539
AV_CODEC_ID_DVB_TELETEXT
@ AV_CODEC_ID_DVB_TELETEXT
Definition: codec_id.h:563
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
data
const char data[16]
Definition: mxf.c:149
AV_PKT_DATA_S12M_TIMECODE
@ AV_PKT_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:288
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
reverse.h
mathematics.h
AVDictionary
Definition: dict.c:34
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: defs.h:203
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: avformat.c:867
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
AV_FOURCC_MAX_STRING_SIZE
#define AV_FOURCC_MAX_STRING_SIZE
Definition: avutil.h:346
timecode.h
AV_CODEC_ID_R210
@ AV_CODEC_ID_R210
Definition: codec_id.h:185
pts
static int64_t pts
Definition: transcode_aac.c:644
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVRational::num
int num
Numerator.
Definition: rational.h:59
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1535
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
intreadwrite.h
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_parity
#define av_parity
Definition: common.h:160
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
if
if(ret)
Definition: filter_design.txt:179
AVFormatContext
Format I/O context.
Definition: avformat.h:1300
internal.h
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:522
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:787
NULL
#define NULL
Definition: coverity.c:32
VANCLineNumber::field1_vanc_start
int field1_vanc_start
Definition: decklink_dec.cpp:66
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
VANCLineNumber::field0_vanc_end
int field0_vanc_end
Definition: decklink_dec.cpp:65
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
time.h
AV_CODEC_ID_SMPTE_KLV
@ AV_CODEC_ID_SMPTE_KLV
Definition: codec_id.h:593
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
av_timecode_init_from_components
int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
Init a timecode struct from the passed timecode components.
Definition: timecode.c:232
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:85
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1416
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_fourcc_make_string
char * av_fourcc_make_string(char *buf, uint32_t fourcc)
Fill the provided buffer with a string containing a FourCC (four-character code) representation.
Definition: utils.c:75
AV_CODEC_ID_V210
@ AV_CODEC_ID_V210
Definition: codec_id.h:179
avdevice.h
av_packet_unpack_dictionary
int av_packet_unpack_dictionary(const uint8_t *data, size_t size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: packet.c:349
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
VANCLineNumber::mode
BMDDisplayMode mode
Definition: decklink_dec.cpp:63
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
line
Definition: graph2dot.c:48
av_packet_pack_dictionary
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
Pack a dictionary for use in side_data.
Definition: packet.c:314
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
AV_PKT_DATA_STRINGS_METADATA
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
Definition: packet.h:169
av_timecode_get_smpte_from_framenum
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
Convert frame number to SMPTE 12M binary representation.
Definition: timecode.c:53
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: packet.c:252
VANCLineNumber
Definition: decklink_dec.cpp:62
internal.h
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:204
common.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
AV_TIMECODE_FLAG_DROPFRAME
@ AV_TIMECODE_FLAG_DROPFRAME
timecode is drop frame
Definition: timecode.h:36
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
KLVPacket
Definition: mxf.h:74
AVDeviceInfoList
List of devices.
Definition: avdevice.h:343
avformat.h
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:754
input_callback
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:209
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:914
AVPacket::stream_index
int stream_index
Definition: packet.h:541
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_PCM_S32LE
@ AV_CODEC_ID_PCM_S32LE
Definition: codec_id.h:342
avutil.h
AV_PKT_DATA_A53_CC
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:239
packet_internal.h
it
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s it
Definition: writing_filters.txt:31
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVPacket
This structure stores compressed data.
Definition: packet.h:516
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
VANCLineNumber::vanc_start
int vanc_start
Definition: decklink_dec.cpp:64
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
width
#define width
Definition: dsp.h:85
AVTimecode
Definition: timecode.h:41
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1328
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
av_timecode_make_string
char * av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum_arg)
Load timecode string in buf.
Definition: timecode.c:103