Basically what was happening is that if the master card lost
or corrupted a frame, which we didn't set a timestamp on,
causing it to have steady_clock::time_point::min(). This would
in turn cause us to assume a latency of trillions of seconds,
throwing off the filter and essentially making it be 0.95 forever.
The fix is twofold; we always set timestamps, but also make
ourselves robust to the ones that are way off (negative uptime).
video_format.width = width;
video_format.height = height;
-
- current_video_frame.received_timestamp = now;
}
}
audio_format.bits_per_sample = 32;
audio_format.num_channels = 2;
-
- current_audio_frame.received_timestamp = now;
}
}
+ current_video_frame.received_timestamp = now;
+ current_audio_frame.received_timestamp = now;
+
if (current_video_frame.data != nullptr || current_audio_frame.data != nullptr) {
// TODO: Put into a queue and put into a dequeue thread, if the
// BlackMagic drivers don't already do that for us?
new_frame.length = frame_length;
new_frame.interlaced = false;
new_frame.dropped_frames = dropped_frames;
+ new_frame.received_timestamp = video_frame.received_timestamp;
card->new_frames.push_back(move(new_frame));
card->new_frames_changed.notify_all();
}
return true;
}
+ // This can happen when we get dropped frames on the master card.
+ if (duration<double>(ts.time_since_epoch()).count() <= 0.0) {
+ rate_adjustment_policy = DO_NOT_ADJUST_RATE;
+ }
+
if (rate_adjustment_policy == ADJUST_RATE && (a0.good_sample || a1.good_sample)) {
// Estimate the current number of input samples produced at
// this instant in time, by extrapolating from the last known