*/
#include <atomic>
+#include <vector>
using std::atomic;
/* Include internal.h first to avoid conflict between winsock.h (used by
extern "C" {
#include "config.h"
+#include "libavcodec/packet_internal.h"
#include "libavformat/avformat.h"
#include "libavutil/avassert.h"
#include "libavutil/avutil.h"
#include "libavutil/common.h"
+#include "libavutil/internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/time.h"
+#include "libavutil/timecode.h"
#include "libavutil/mathematics.h"
#include "libavutil/reverse.h"
#include "avdevice.h"
static void avpacket_queue_flush(AVPacketQueue *q)
{
- AVPacketList *pkt, *pkt1;
+ PacketList *pkt, *pkt1;
pthread_mutex_lock(&q->mutex);
for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
{
- AVPacketList *pkt1;
+ PacketList *pkt1;
// Drop Packet if queue size is > maximum queue size
if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
return -1;
}
- pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
+ pkt1 = (PacketList *)av_malloc(sizeof(PacketList));
if (!pkt1) {
av_packet_unref(pkt);
return -1;
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
{
- AVPacketList *pkt1;
+ PacketList *pkt1;
int ret;
pthread_mutex_lock(&q->mutex);
return ret;
}
+static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
+{
+ const uint8_t KLV_DID = 0x44;
+ const uint8_t KLV_IN_VANC_SDID = 0x04;
+
+ struct KLVPacket
+ {
+ uint16_t sequence_counter;
+ std::vector<uint8_t> data;
+ };
+
+ size_t total_size = 0;
+ std::vector<std::vector<KLVPacket>> klv_packets(256);
+
+ IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
+ if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
+ return;
+
+ IDeckLinkAncillaryPacketIterator *it = nullptr;
+ if (packets->GetPacketIterator(&it) != S_OK) {
+ packets->Release();
+ return;
+ }
+
+ IDeckLinkAncillaryPacket *packet = nullptr;
+ while (it->Next(&packet) == S_OK) {
+ uint8_t *data = nullptr;
+ uint32_t size = 0;
+
+ if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
+ av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
+
+ if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
+ // MID and PSC
+ if (size > 3) {
+ uint8_t mid = data[0];
+ uint16_t psc = data[1] << 8 | data[2];
+
+ av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
+
+ auto& list = klv_packets[mid];
+ uint16_t expected_psc = list.size() + 1;
+
+ if (psc == expected_psc) {
+ uint32_t data_len = size - 3;
+ total_size += data_len;
+
+ KLVPacket packet{ psc };
+ packet.data.resize(data_len);
+ memcpy(packet.data.data(), data + 3, data_len);
+
+ list.push_back(std::move(packet));
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
+
+ if (!list.empty()) {
+ for (auto& klv : list)
+ total_size -= klv.data.size();
+
+ list.clear();
+ }
+ }
+ }
+ }
+ }
+
+ packet->Release();
+ }
+
+ it->Release();
+ packets->Release();
+
+ if (total_size > 0) {
+ std::vector<uint8_t> klv;
+ klv.reserve(total_size);
+
+ for (size_t i = 0; i < klv_packets.size(); ++i) {
+ auto& list = klv_packets[i];
+
+ if (list.empty())
+ continue;
+
+ av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
+
+ for (auto& packet : list)
+ klv.insert(klv.end(), packet.data.begin(), packet.data.end());
+ }
+
+ AVPacket klv_packet = { 0 };
+ klv_packet.pts = pts;
+ klv_packet.dts = pts;
+ klv_packet.flags |= AV_PKT_FLAG_KEY;
+ klv_packet.stream_index = ctx->klv_st->index;
+ klv_packet.data = klv.data();
+ klv_packet.size = klv.size();
+
+ if (avpacket_queue_put(&ctx->queue, &klv_packet) < 0) {
+ ++ctx->dropped;
+ }
+ }
+}
+
class decklink_input_callback : public IDeckLinkInputCallback
{
public:
return pts;
}
+static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
+{
+ IDeckLinkTimecode *timecode;
+ int ret = AVERROR(ENOENT);
+#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
+ int hfr = (tc_format == bmdTimecodeRP188HighFrameRate);
+#else
+ int hfr = 0;
+#endif
+ if (videoFrame->GetTimecode(tc_format, &timecode) == S_OK) {
+ uint8_t hh, mm, ss, ff;
+ if (timecode->GetComponents(&hh, &mm, &ss, &ff) == S_OK) {
+ int flags = (timecode->GetFlags() & bmdTimecodeIsDropFrame) ? AV_TIMECODE_FLAG_DROPFRAME : 0;
+ if (!hfr && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1)
+ ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
+ ret = av_timecode_init_from_components(tc, frame_rate, flags, hh, mm, ss, ff, avctx);
+ }
+ timecode->Release();
+ }
+ return ret;
+}
+
+static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
+{
+ AVRational frame_rate = ctx->video_st->r_frame_rate;
+ int ret;
+ /* 50/60 fps content has alternating VITC1 and VITC2 timecode (see SMPTE ST
+ * 12-2, section 7), so the native ordering of RP188Any (HFR, VITC1, LTC,
+ * VITC2) would not work because LTC might not contain the field flag.
+ * Therefore we query the types manually. */
+ if (ctx->tc_format == bmdTimecodeRP188Any && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1) {
+#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
+ ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
+ if (ret == AVERROR(ENOENT))
+#endif
+ ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC1, videoFrame);
+ if (ret == AVERROR(ENOENT))
+ ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC2, videoFrame);
+ if (ret == AVERROR(ENOENT))
+ ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188LTC, videoFrame);
+ } else {
+ ret = get_bmd_timecode(avctx, tc, frame_rate, ctx->tc_format, videoFrame);
+ }
+ return ret;
+}
+
HRESULT decklink_input_callback::VideoInputFrameArrived(
IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
{
// Handle Video Frame
if (videoFrame) {
- AVPacket pkt;
- av_init_packet(&pkt);
+ AVPacket pkt = { 0 };
if (ctx->frameCount % 25 == 0) {
unsigned long long qsize = avpacket_queue_size(&ctx->queue);
av_log(avctx, AV_LOG_DEBUG,
// Handle Timecode (if requested)
if (ctx->tc_format) {
- IDeckLinkTimecode *timecode;
- if (videoFrame->GetTimecode(ctx->tc_format, &timecode) == S_OK) {
- const char *tc = NULL;
- DECKLINK_STR decklink_tc;
- if (timecode->GetString(&decklink_tc) == S_OK) {
- tc = DECKLINK_STRDUP(decklink_tc);
- DECKLINK_FREE(decklink_tc);
- }
- timecode->Release();
+ AVTimecode tcr;
+ if (get_frame_timecode(avctx, ctx, &tcr, videoFrame) >= 0) {
+ char tcstr[AV_TIMECODE_STR_SIZE];
+ const char *tc = av_timecode_make_string(&tcr, tcstr, 0);
if (tc) {
AVDictionary* metadata_dict = NULL;
- int metadata_len;
uint8_t* packed_metadata;
- if (av_dict_set(&metadata_dict, "timecode", tc, AV_DICT_DONT_STRDUP_VAL) >= 0) {
+
+ if (av_cmp_q(ctx->video_st->r_frame_rate, av_make_q(60, 1)) < 1) {
+ uint32_t tc_data = av_timecode_get_smpte_from_framenum(&tcr, 0);
+ int size = sizeof(uint32_t) * 4;
+ uint32_t *sd = (uint32_t *)av_packet_new_side_data(&pkt, AV_PKT_DATA_S12M_TIMECODE, size);
+
+ if (sd) {
+ *sd = 1; // one TC
+ *(sd + 1) = tc_data; // TC
+ }
+ }
+
+ if (av_dict_set(&metadata_dict, "timecode", tc, 0) >= 0) {
+ size_t metadata_len;
packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
av_dict_free(&metadata_dict);
if (packed_metadata) {
if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
av_freep(&packed_metadata);
+ else if (!ctx->tc_seen)
+ ctx->tc_seen = ctx->frameCount;
}
}
}
}
}
+ if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
+
+ av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
+ av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
+ "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
+ return S_OK;
+ }
+
pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
pkt.dts = pkt.pts;
if (!no_video) {
IDeckLinkVideoFrameAncillary *vanc;
- AVPacket txt_pkt;
+ AVPacket txt_pkt = { 0 };
uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
uint8_t *txt_buf = txt_buf0;
+ if (ctx->enable_klv) {
+ handle_klv(avctx, ctx, videoFrame, pkt.pts);
+ }
+
if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
int i;
- int64_t line_mask = 1;
BMDPixelFormat vanc_format = vanc->GetPixelFormat();
txt_buf[0] = 0x10; // data_identifier - EBU_data
txt_buf++;
#if CONFIG_LIBZVBI
if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
(vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
+ int64_t line_mask = 1;
av_assert0(videoFrame->GetWidth() == 720);
for (i = 6; i < 336; i++, line_mask <<= 1) {
uint8_t *buf;
txt_buf[1] = 0x2c; // data_unit_length
txt_buf += 46;
}
- av_init_packet(&txt_pkt);
txt_pkt.pts = pkt.pts;
txt_pkt.dts = pkt.dts;
txt_pkt.stream_index = ctx->teletext_st->index;
// Handle Audio Frame
if (audioFrame) {
- AVPacket pkt;
+ AVPacket pkt = { 0 };
BMDTimeValue audio_pts;
- av_init_packet(&pkt);
//hack among hacks
pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
HRESULT decklink_input_callback::VideoInputFormatChanged(
BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
- BMDDetectedVideoInputFormatFlags)
+ BMDDetectedVideoInputFormatFlags formatFlags)
{
+ struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
ctx->bmd_mode = mode->GetDisplayMode();
+ // check the C context member to make sure we set both raw_format and bmd_mode with data from the same format change callback
+ if (!cctx->raw_format)
+ ctx->raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
return S_OK;
}
return -1;
}
- // 1 second timeout
- for (i = 0; i < 10; i++) {
+ // 3 second timeout
+ for (i = 0; i < 30; i++) {
av_usleep(100000);
/* Sometimes VideoInputFrameArrived is called without the
* bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
- if (ctx->capture_started) {
+ if (ctx->dli) {
ctx->dli->StopStreams();
ctx->dli->DisableVideoInput();
ctx->dli->DisableAudioInput();
class decklink_input_callback *input_callback;
AVStream *st;
HRESULT result;
- char fname[1024];
- char *tmp;
- int mode_num = 0;
int ret;
ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
return AVERROR(ENOMEM);
ctx->list_devices = cctx->list_devices;
ctx->list_formats = cctx->list_formats;
+ ctx->enable_klv = cctx->enable_klv;
ctx->teletext_lines = cctx->teletext_lines;
ctx->preroll = cctx->preroll;
ctx->duplex_mode = cctx->duplex_mode;
ctx->video_pts_source = cctx->video_pts_source;
ctx->draw_bars = cctx->draw_bars;
ctx->audio_depth = cctx->audio_depth;
+ if (cctx->raw_format > 0 && (unsigned int)cctx->raw_format < FF_ARRAY_ELEMS(decklink_raw_format_map))
+ ctx->raw_format = decklink_raw_format_map[cctx->raw_format];
cctx->ctx = ctx;
/* Check audio channel option for valid values: 2, 8 or 16 */
return AVERROR_EXIT;
}
- if (cctx->v210) {
- av_log(avctx, AV_LOG_WARNING, "The bm_v210 option is deprecated and will be removed. Please use the -raw_format yuv422p10.\n");
- cctx->raw_format = MKBETAG('v','2','1','0');
- }
-
- av_strlcpy(fname, avctx->url, sizeof(fname));
- tmp=strchr (fname, '@');
- if (tmp != NULL) {
- av_log(avctx, AV_LOG_WARNING, "The @mode syntax is deprecated and will be removed. Please use the -format_code option.\n");
- mode_num = atoi (tmp+1);
- *tmp = 0;
- }
-
- ret = ff_decklink_init_device(avctx, fname);
+ ret = ff_decklink_init_device(avctx, avctx->url);
if (ret < 0)
return ret;
goto error;
}
- if (mode_num == 0 && !cctx->format_code) {
+ if (!cctx->format_code) {
if (decklink_autodetect(cctx) < 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
ret = AVERROR(EIO);
}
av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
}
- if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) {
- av_log(avctx, AV_LOG_ERROR, "Could not set mode number %d or format code %s for %s\n",
- mode_num, (cctx->format_code) ? cctx->format_code : "(unset)", fname);
+ if (ctx->raw_format == (BMDPixelFormat)0)
+ ctx->raw_format = bmdFormat8BitYUV;
+ if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
+ cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
ret = AVERROR(EIO);
goto error;
}
st->time_base.num = ctx->bmd_tb_num;
st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
- switch((BMDPixelFormat)cctx->raw_format) {
+ switch(ctx->raw_format) {
case bmdFormat8BitYUV:
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
- st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
st->codecpar->format = AV_PIX_FMT_UYVY422;
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
break;
case bmdFormat10BitYUV:
st->codecpar->codec_id = AV_CODEC_ID_V210;
- st->codecpar->codec_tag = MKTAG('V','2','1','0');
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
- st->codecpar->bits_per_coded_sample = 10;
break;
case bmdFormat8BitARGB:
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->format = AV_PIX_FMT_0RGB;
- st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
break;
case bmdFormat8BitBGRA:
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->format = AV_PIX_FMT_BGR0;
- st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format);
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
break;
case bmdFormat10BitRGB:
st->codecpar->codec_id = AV_CODEC_ID_R210;
- st->codecpar->codec_tag = MKTAG('R','2','1','0');
- st->codecpar->format = AV_PIX_FMT_RGB48LE;
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
- st->codecpar->bits_per_coded_sample = 10;
break;
default:
- av_log(avctx, AV_LOG_ERROR, "Raw Format %.4s not supported\n", (char*) &cctx->raw_format);
+ char fourcc_str[AV_FOURCC_MAX_STRING_SIZE] = {0};
+ av_fourcc_make_string(fourcc_str, ctx->raw_format);
+ av_log(avctx, AV_LOG_ERROR, "Raw Format %s not supported\n", fourcc_str);
ret = AVERROR(EINVAL);
goto error;
}
ctx->video_st=st;
+ if (ctx->enable_klv) {
+ st = avformat_new_stream(avctx, NULL);
+ if (!st) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
+ st->time_base.den = ctx->bmd_tb_den;
+ st->time_base.num = ctx->bmd_tb_num;
+ st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
+ avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ ctx->klv_st = st;
+ }
+
if (ctx->teletext_lines) {
st = avformat_new_stream(avctx, NULL);
if (!st) {
}
result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
- (BMDPixelFormat) cctx->raw_format,
+ ctx->raw_format,
bmdVideoInputFlagDefault);
if (result != S_OK) {
avpacket_queue_get(&ctx->queue, pkt, 1);
if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
- int size;
+ size_t size;
const uint8_t *side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
if (side_metadata) {
if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)