34 #include <DeckLinkAPI.h>
59 #define MAX_WIDTH_VANC 1920
78 {bmdModeNTSC, 11, 19, 274, 282},
79 {bmdModeNTSC2398, 11, 19, 274, 282},
80 {bmdModePAL, 7, 22, 320, 335},
81 {bmdModeNTSCp, 11, -1, -1, 39},
82 {bmdModePALp, 7, -1, -1, 45},
86 {bmdModeHD1080p2398, 8, -1, -1, 42},
87 {bmdModeHD1080p24, 8, -1, -1, 42},
88 {bmdModeHD1080p25, 8, -1, -1, 42},
89 {bmdModeHD1080p2997, 8, -1, -1, 42},
90 {bmdModeHD1080p30, 8, -1, -1, 42},
91 {bmdModeHD1080i50, 8, 20, 570, 585},
92 {bmdModeHD1080i5994, 8, 20, 570, 585},
93 {bmdModeHD1080i6000, 8, 20, 570, 585},
94 {bmdModeHD1080p50, 8, -1, -1, 42},
95 {bmdModeHD1080p5994, 8, -1, -1, 42},
96 {bmdModeHD1080p6000, 8, -1, -1, 42},
100 {bmdModeHD720p50, 8, -1, -1, 26},
101 {bmdModeHD720p5994, 8, -1, -1, 26},
102 {bmdModeHD720p60, 8, -1, -1, 26},
105 {bmdModeUnknown, 0, -1, -1, -1}
115 virtual HRESULT STDMETHODCALLTYPE
AllocateBuffer(
unsigned int bufferSize,
void* *allocatedBuffer)
119 return E_OUTOFMEMORY;
120 *allocatedBuffer = buf;
128 virtual HRESULT STDMETHODCALLTYPE
Commit() {
return S_OK; }
129 virtual HRESULT STDMETHODCALLTYPE
Decommit() {
return S_OK; }
132 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
149 IUnknown *obj = (
class IUnknown *)opaque;
173 uint16_t vanc_sum = 0;
174 for (
i = 3;
i <
len - 1;
i++) {
178 if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
185 vanc_sum |= ((~vanc_sum & 0x100) << 1);
198 *dst++ = (
src[1] >> 2) + ((
src[2] & 15) << 6);
199 *dst++ =
src[4] + ((
src[5] & 3) << 8);
200 *dst++ = (
src[6] >> 4) + ((
src[7] & 63) << 4);
208 for (
i = 0;
i <
width * 2 / 3;
i++) {
209 *dst++ =
src[0] + ((
src[1] & 3) << 8);
210 *dst++ = (
src[1] >> 2) + ((
src[2] & 15) << 6);
211 *dst++ = (
src[2] >> 4) + ((
src[3] & 63) << 4);
237 vbi_bit_slicer slicer;
239 vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
241 if (vbi_bit_slice(&slicer,
src, tgt + 4) == FALSE)
258 *py++ = (
src[1] >> 4) + ((
src[2] & 15) << 4);
259 *py++ = (
src[4] >> 2) + ((
src[5] & 3 ) << 6);
260 *py++ = (
src[6] >> 6) + ((
src[7] & 63) << 2);
263 return teletext_data_unit_from_vbi_data(
line, y, tgt, VBI_PIXFMT_YUV420);
271 if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
279 for (
i = 0;
i < 42;
i++)
298 if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) {
299 uint16_t *descriptors = py + 4;
302 for (
i = 0;
i < 5 && py < pend - 45;
i++, py += 45) {
303 int line = (descriptors[
i] & 31) + (!(descriptors[
i] & 128)) * 313;
314 uint16_t did = py[0];
315 uint16_t sdid = py[1];
316 uint16_t
dc = py[2] & 255;
319 if (did == 0x143 && sdid == 0x102) {
321 }
else if (allow_multipacket && did == 0x143 && sdid == 0x203) {
323 while (py < pend - 3) {
325 py += 4 + (py[2] & 255);
334 size_t i,
len = (buf[5] & 0xff) + 6 + 1;
338 uint16_t *cdp = &buf[6];
339 if (cdp[0] != 0x96 || cdp[1] != 0x69) {
352 for (
i = 0;
i <
len - 1;
i++)
354 cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
355 if (cdp[
len - 1] != cdp_sum) {
361 if (!(rate & 0x0f)) {
371 if (!(cdp[4] & 0x43)) {
376 hdr = (cdp[5] << 8) | cdp[6];
377 if (cdp[7] != 0x72) {
383 if (!(cc_count & 0xe0)) {
389 if ((
len - 13) < cc_count * 3) {
394 if (cdp[
len - 4] != 0x74) {
399 ftr = (cdp[
len - 3] << 8) | cdp[
len - 2];
411 for (
size_t i = 0;
i < cc_count;
i++) {
412 cc[3*
i + 0] = cdp[9 + 3*
i+0] ;
413 cc[3*
i + 1] = cdp[9 + 3*
i+1];
414 cc[3*
i + 2] = cdp[9 + 3*
i+2];
425 uint16_t *max_buf = buf +
width;
427 while (buf < max_buf - 6) {
429 uint16_t did = buf[3] & 0xFF;
430 uint16_t sdid = buf[4] & 0xFF;
432 if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
436 len = (buf[5] & 0xff) + 6 + 1;
437 if (
len > max_buf - buf) {
443 if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->
teletext_lines &&
444 width == 1920 && tgt_size >= 1920) {
450 }
else if (did == 0x61 && sdid == 0x01) {
451 unsigned int data_len;
510 unsigned long long size;
593 const uint8_t KLV_IN_VANC_SDID = 0x04;
597 uint16_t sequence_counter;
598 std::vector<uint8_t>
data;
601 size_t total_size = 0;
602 std::vector<std::vector<KLVPacket>> klv_packets(256);
604 IDeckLinkVideoFrameAncillaryPackets *packets =
nullptr;
605 if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (
void**)&packets) != S_OK)
608 IDeckLinkAncillaryPacketIterator *it =
nullptr;
609 if (packets->GetPacketIterator(&it) != S_OK) {
614 IDeckLinkAncillaryPacket *packet =
nullptr;
615 while (it->Next(&packet) == S_OK) {
619 if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
620 av_log(avctx,
AV_LOG_DEBUG,
"Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
622 if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (
const void**) &
data, &
size) == S_OK) {
626 uint16_t psc =
data[1] << 8 |
data[2];
630 auto& list = klv_packets[mid];
631 uint16_t expected_psc = list.size() + 1;
633 if (psc == expected_psc) {
634 uint32_t data_len =
size - 3;
635 total_size += data_len;
638 packet.data.resize(data_len);
639 memcpy(packet.data.data(),
data + 3, data_len);
641 list.push_back(std::move(packet));
646 for (
auto& klv : list)
647 total_size -= klv.data.size();
662 if (total_size > 0) {
663 std::vector<uint8_t> klv;
664 klv.reserve(total_size);
666 for (
size_t i = 0;
i < klv_packets.size(); ++
i) {
667 auto& list = klv_packets[
i];
674 for (
auto& packet : list)
675 klv.insert(klv.end(), packet.data.begin(), packet.data.end());
683 klv_packet.
data = klv.data();
684 klv_packet.
size = klv.size();
698 virtual HRESULT STDMETHODCALLTYPE
QueryInterface(REFIID iid, LPVOID *ppv) {
return E_NOINTERFACE; }
699 virtual ULONG STDMETHODCALLTYPE
AddRef(
void);
700 virtual ULONG STDMETHODCALLTYPE
Release(
void);
701 virtual HRESULT STDMETHODCALLTYPE
VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
702 virtual HRESULT STDMETHODCALLTYPE
VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
740 IDeckLinkAudioInputPacket *audioFrame,
742 int64_t abs_wallclock,
748 BMDTimeValue bmd_pts;
749 BMDTimeValue bmd_duration;
750 HRESULT res = E_INVALIDARG;
754 res = audioFrame->GetPacketTime(&bmd_pts, time_base.
den);
758 res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.
den);
762 res = videoFrame->GetHardwareReferenceTimestamp(time_base.
den, &bmd_pts, &bmd_duration);
781 pts = bmd_pts / time_base.
num;
795 IDeckLinkTimecode *timecode;
797 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
798 int hfr = (
tc_format == bmdTimecodeRP188HighFrameRate);
802 if (videoFrame->GetTimecode(
tc_format, &timecode) == S_OK) {
804 if (timecode->GetComponents(&hh, &mm, &
ss, &ff) == S_OK) {
807 ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
824 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
825 ret =
get_bmd_timecode(avctx,
tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
840 IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
843 void *audioFrameBytes;
844 BMDTimeValue frameTime;
845 BMDTimeValue frameDuration;
846 int64_t wallclock = 0, abs_wallclock = 0;
850 if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
862 if (
av_cmp_q(remainder, frame_duration) > 0) {
880 "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
882 videoFrame->GetRowBytes() * videoFrame->GetHeight(),
883 (
double)qsize / 1024 / 1024);
886 videoFrame->GetBytes(&frameBytes);
887 videoFrame->GetStreamTime(&frameTime, &frameDuration,
890 if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
891 if (
ctx->
draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
893 0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
894 0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
895 int width = videoFrame->GetWidth();
896 int height = videoFrame->GetHeight();
897 unsigned *p = (
unsigned *)frameBytes;
899 for (
int y = 0; y <
height; y++) {
900 for (
int x = 0; x <
width; x += 2)
901 *p++ = bars[(x * 8) /
width];
929 int size =
sizeof(uint32_t) * 4;
942 if (packed_metadata) {
972 pkt.
size = videoFrame->GetRowBytes() *
973 videoFrame->GetHeight();
977 IDeckLinkVideoFrameAncillary *vanc;
986 if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
988 BMDPixelFormat vanc_format = vanc->GetPixelFormat();
993 (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
994 int64_t line_mask = 1;
996 for (
i = 6;
i < 336;
i++, line_mask <<= 1) {
998 if ((
ctx->
teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(
i, (
void**)&buf) == S_OK) {
999 if (vanc_format == bmdFormat8BitYUV)
1000 txt_buf = teletext_data_unit_from_vbi_data(
i, buf, txt_buf, VBI_PIXFMT_UYVY);
1002 txt_buf = teletext_data_unit_from_vbi_data_10bit(
i, buf, txt_buf);
1009 if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <=
MAX_WIDTH_VANC) {
1013 if (vanc->GetBufferForVerticalBlankingLine(
i, (
void**)&buf) == S_OK) {
1015 size_t vanc_size = videoFrame->GetWidth();
1017 vanc_size = vanc_size * 2;
1023 txt_buf,
sizeof(txt_buf0) - (txt_buf - txt_buf0), &
pkt);
1030 if (txt_buf - txt_buf0 > 1) {
1031 int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
1032 while (stuffing_units--) {
1033 memset(txt_buf, 0xff, 46);
1040 txt_pkt.
data = txt_buf0;
1041 txt_pkt.
size = txt_buf - txt_buf0;
1051 videoFrame->AddRef();
1061 BMDTimeValue audio_pts;
1065 audioFrame->GetBytes(&audioFrameBytes);
1084 BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *
mode,
1085 BMDDetectedVideoInputFormatFlags formatFlags)
1091 ctx->
raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
1100 if (
ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1102 if (autodetect_supported ==
false)
1105 ctx->autodetect = 1;
1106 ctx->bmd_mode = bmdModeUnknown;
1109 bmdVideoInputEnableFormatDetection) != S_OK) {
1113 if (
ctx->dli->StartStreams() != S_OK) {
1118 for (
i = 0;
i < 30;
i++) {
1123 if (
ctx->bmd_mode != bmdModeUnknown &&
1128 ctx->dli->PauseStreams();
1129 ctx->dli->FlushStreams();
1130 ctx->autodetect = 0;
1131 if (
ctx->bmd_mode != bmdModeUnknown) {
1151 ctx->dli->StopStreams();
1152 ctx->dli->DisableVideoInput();
1153 ctx->dli->DisableAudioInput();
1214 av_log(avctx,
AV_LOG_ERROR,
"Value for audio bit depth option must be either 16 or 32\n");
1219 if (
ctx->list_devices) {
1229 if (
ctx->dl->QueryInterface(IID_IDeckLinkInput, (
void **) &
ctx->dli) != S_OK) {
1243 if (
ctx->list_formats) {
1258 ret = (
ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 :
AVERROR_EXTERNAL);
1259 allocator->Release();
1273 if (
ctx->raw_format == (BMDPixelFormat)0)
1274 ctx->raw_format = bmdFormat8BitYUV;
1283 if (
ctx->teletext_lines &&
ctx->bmd_mode == bmdModePAL) {
1284 av_log(avctx,
AV_LOG_ERROR,
"Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1299 st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1311 st->codecpar->width =
ctx->bmd_width;
1312 st->codecpar->height =
ctx->bmd_height;
1314 st->time_base.den =
ctx->bmd_tb_den;
1315 st->time_base.num =
ctx->bmd_tb_num;
1316 st->r_frame_rate =
av_make_q(st->time_base.den, st->time_base.num);
1318 switch(
ctx->raw_format) {
1319 case bmdFormat8BitYUV:
1322 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1324 case bmdFormat10BitYUV:
1326 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1328 case bmdFormat8BitARGB:
1331 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1333 case bmdFormat8BitBGRA:
1336 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1338 case bmdFormat10BitRGB:
1340 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1350 switch (
ctx->bmd_field_dominance) {
1351 case bmdUpperFieldFirst:
1354 case bmdLowerFieldFirst:
1357 case bmdProgressiveFrame:
1358 case bmdProgressiveSegmentedFrame:
1367 if (
ctx->enable_klv) {
1374 st->time_base.den =
ctx->bmd_tb_den;
1375 st->time_base.num =
ctx->bmd_tb_num;
1381 if (
ctx->teletext_lines) {
1389 st->time_base.den =
ctx->bmd_tb_den;
1390 st->time_base.num =
ctx->bmd_tb_num;
1393 ctx->teletext_st = st;
1397 result =
ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->
audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger,
ctx->audio_st->codecpar->channels);
1399 if (result != S_OK) {
1405 result =
ctx->dli->EnableVideoInput(
ctx->bmd_mode,
1407 bmdVideoInputFlagDefault);
1409 if (result != S_OK) {
1417 if (
ctx->dli->StartStreams() != S_OK) {
1440 if (side_metadata) {
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Main libavdevice API header.
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Convenience header that includes libavutil's core.
#define flags(name, subs,...)
#define ss(width, name, subs,...)
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
virtual HRESULT STDMETHODCALLTYPE Decommit()
virtual HRESULT STDMETHODCALLTYPE Commit()
virtual ~decklink_allocator()
virtual ULONG STDMETHODCALLTYPE Release(void)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void **allocatedBuffer)
virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void *buffer)
common internal and external API header
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
void ff_decklink_cleanup(AVFormatContext *avctx)
static const BMDVideoConnection decklink_video_connection_map[]
static const BMDTimecodeFormat decklink_timecode_format_map[]
static const BMDPixelFormat decklink_raw_format_map[]
static const BMDAudioConnection decklink_audio_connection_map[]
static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame, int64_t wallclock, int64_t abs_wallclock, DecklinkPtsSource pts_src, AVRational time_base, int64_t *initial_pts, int copyts)
static void fill_data_unit_head(int line, uint8_t *tgt)
static VANCLineNumber vanc_line_numbers[]
static uint8_t calc_parity_and_line_offset(int line)
static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum)
static unsigned long long avpacket_queue_size(AVPacketQueue *q)
static uint8_t * vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words, unsigned &cc_count)
static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
static int decklink_autodetect(struct decklink_cctx *cctx)
static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
av_cold int ff_decklink_read_close(AVFormatContext *avctx)
static uint8_t * teletext_data_unit_from_op47_vbi_packet(int line, uint16_t *py, uint8_t *tgt)
static void avpacket_queue_end(AVPacketQueue *q)
static int linemask_matches(int line, int64_t mask)
static uint8_t * teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
static void clear_parity_bits(uint16_t *buf, int len)
const BMDDisplayMode AUTODETECT_DEFAULT_MODE
static int get_vanc_line_idx(BMDDisplayMode mode)
static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
static uint8_t * teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
int ff_decklink_list_input_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
static void avpacket_queue_flush(AVPacketQueue *q)
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
static uint8_t * get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width, uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
av_cold int ff_decklink_read_header(AVFormatContext *avctx)
static void decklink_object_free(void *opaque, uint8_t *data)
mode
Use these values in ebur128_init (or'ed).
#define pthread_mutex_lock(a)
#define pthread_mutex_unlock(a)
@ AV_CODEC_ID_DVB_TELETEXT
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
@ AV_PKT_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1:2014.
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVBufferRef * av_buffer_create(uint8_t *data, buffer_size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
#define AVERROR_EXTERNAL
Generic error in an external library.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_VERBOSE
Detailed information.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
#define AV_FOURCC_MAX_STRING_SIZE
char * av_fourcc_make_string(char *buf, uint32_t fourcc)
Fill the provided buffer with a string containing a FourCC (four-character code) representation.
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_TIME_BASE
Internal time base represented as integer.
common internal API header
const uint8_t ff_reverse[256]
static const uint16_t mask[17]
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
#define FF_ARRAY_ELEMS(a)
static int shift(int a, int b)
AVDictionary * metadata
Metadata that applies to the whole file.
char * url
input or output URL.
void * priv_data
Format private data.
This structure stores compressed data.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
int flags
A combination of AV_PKT_FLAG values.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Rational number (pair of numerator and denominator).
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int index
stream index in AVFormatContext
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
AVRational r_frame_rate
Real base framerate of the stream.
DecklinkPtsSource video_pts_source
DecklinkPtsSource audio_pts_source
DecklinkPtsSource audio_pts_source
DecklinkPtsSource video_pts_source
BMDTimecodeFormat tc_format
BMDPixelFormat raw_format
static void error(const char *err)
static volatile int checksum
int av_usleep(unsigned usec)
Sleep for a period of time.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
int64_t av_gettime(void)
Get the current time in microseconds.
char * av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
Load timecode string in buf.
int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
Init a timecode struct from the passed timecode components.
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
Convert frame number to SMPTE 12M binary representation.
#define AV_TIMECODE_STR_SIZE
@ AV_TIMECODE_FLAG_DROPFRAME
timecode is drop frame