#include <srt/srt.h>
#endif
-#define FRAME_SIZE (8 << 20) // 8 MB.
-
using namespace std;
using namespace std::chrono;
using namespace bmusb;
if (desc->comp[0].depth != 8) continue;
// Same or better chroma resolution only.
- int chroma_w_diff = desc->log2_chroma_w - src_desc->log2_chroma_w;
- int chroma_h_diff = desc->log2_chroma_h - src_desc->log2_chroma_h;
+ int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
+ int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
if (chroma_w_diff < 0 || chroma_h_diff < 0)
continue;
fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
}
- // We found no VA-API formats, so take the best software format.
+ // We found no VA-API formats, so take the first software format.
+ for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
+ if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
+ fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
+ return *fmt_ptr;
+ }
+ }
+
+ // Fallback: Just return anything. (Should never really happen.)
return fmt[0];
}
AVFormatContextWithCloser format_ctx;
if (srt_sock == -1) {
- // Regular file.
+ // Regular file (or stream).
+ frame_timeout_started = steady_clock::now();
+ frame_timeout_valid = true;
format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
/*options=*/nullptr,
AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
+ frame_timeout_valid = false;
} else {
#ifdef HAVE_SRT
// SRT socket, already opened.
int64_t audio_pts;
bool error;
+ frame_timeout_started = steady_clock::now();
+ frame_timeout_valid = true;
AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
+ frame_timeout_valid = false;
+ if (should_interrupt.load()) {
+ // Abort no matter whether we got a frame or not.
+ return false;
+ }
if (error) {
if (++consecutive_errors >= 100) {
- fprintf(stderr, "More than 100 consecutive video frames, aborting playback.\n");
+ fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n");
return false;
} else {
continue;
current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
}
+
+ // FIXME: Currently, if the video is too high-res for one of the allocated
+ // frames, we simply refuse to scale it here to avoid crashes. It would be better
+ // if we could somehow signal getting larger frames, especially as 4K is a thing now.
+ if (video_frame->len > FRAME_SIZE) {
+ fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
+ *error = true;
+ return video_frame;
+ }
+
sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
return video_frame;
int FFmpegCapture::interrupt_cb()
{
+ // If ten seconds is gone without anything happening, we assume that
+ // we are in a network stream that died and FFmpeg just didn't
+ // pick it up (or perhaps it just hung, keeping the connection open).
+ // Called back approximately every 100 ms if something is hanging,
+ // so we get more than enough accuracy for our purposes.
+ if (!should_interrupt && frame_timeout_valid &&
+ duration<double>(steady_clock::now() - frame_timeout_started).count() >= 10.0) {
+ string filename_copy;
+ {
+ lock_guard<mutex> lock(filename_mu);
+ filename_copy = filename;
+ }
+ fprintf(stderr, "%s: No frame for more than 10 seconds, restarting stream.\n", filename.c_str());
+ should_interrupt = true;
+ }
return should_interrupt.load();
}