return av_get_pix_fmt(best_format);
}
-YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg)
+YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
{
YCbCrFormat format;
AVColorSpace colorspace = frame->colorspace;
format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
break;
default:
- fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
- colorspace);
+ if (colorspace != *last_colorspace) {
+ fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
+ colorspace);
+ }
format.luma_coefficients = YCBCR_REC_709;
break;
}
+ *last_colorspace = colorspace;
format.full_range = is_full_range(desc);
format.num_levels = 1 << desc->comp[0].depth;
format.cb_y_position = 1.0;
break;
default:
- fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
- frame->chroma_location);
+ if (frame->chroma_location != *last_chroma_location) {
+ fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
+ frame->chroma_location);
+ }
format.cb_x_position = 0.5;
format.cb_y_position = 0.5;
break;
}
+ *last_chroma_location = frame->chroma_location;
if (is_mjpeg && !format.full_range) {
// Limited-range MJPEG is only detected by FFmpeg whenever a special
} // namespace
FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
- : filename(filename), width(width), height(height), video_timebase{1, 1}
+ : width(width), height(height), video_timebase{1, 1}
{
+ filenames.push_back(filename);
description = "Video: " + filename;
last_frame = steady_clock::now();
avformat_network_init(); // In case someone wants this.
}
+FFmpegCapture::FFmpegCapture(const std::vector<std::string> &filenames, unsigned width, unsigned height)
+ : filenames(filenames), width(width), height(height), video_timebase{1, 1}
+{
+ description = "Video: " + filenames[0];
+
+ last_frame = steady_clock::now();
+ play_once = true;
+
+ avformat_network_init(); // In case someone wants this.
+}
+
#ifdef HAVE_SRT
FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
: srt_sock(srt_sock),
- width(global_flags.width),
- height(global_flags.height),
+ width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
+ height(0),
pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
video_timebase{1, 1}
{
dequeue_cleanup_callback();
}
swr_free(&resampler);
+#ifdef HAVE_SRT
+ if (srt_sock != -1) {
+ srt_close(srt_sock);
+ }
+#endif
}
void FFmpegCapture::configure_card()
VideoMode mode;
char buf[256];
- snprintf(buf, sizeof(buf), "%ux%u", width, height);
+ snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
mode.name = buf;
mode.autodetect = false;
- mode.width = width;
- mode.height = height;
+ mode.width = sws_last_width;
+ mode.height = sws_last_height;
mode.frame_rate_num = 60;
mode.frame_rate_den = 1;
mode.interlaced = false;
snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
pthread_setname_np(pthread_self(), thread_name);
+ printf("CAP\n");
while (!producer_thread_should_quit.should_quit()) {
string filename_copy;
+ printf("CAP %zu LEFT\n", filenames.size());
{
lock_guard<mutex> lock(filename_mu);
- filename_copy = filename;
+ filename_copy = filenames.front();
+ filenames.erase(filenames.begin());
}
string pathname;
} else {
pathname = description;
}
+ printf("CAP %s\n", pathname.c_str());
if (pathname.empty()) {
send_disconnected_frame();
if (play_once) {
continue;
}
- if (play_once) {
+ if (play_once && filenames.empty()) {
send_disconnected_frame();
break;
}
{
// Send an empty frame to signal that we have no signal anymore.
FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
+ size_t frame_width = width == 0 ? global_flags.width : width;
+ size_t frame_height = height == 0 ? global_flags.height : height;
if (video_frame.data) {
VideoFormat video_format;
- video_format.width = width;
- video_format.height = height;
+ video_format.width = frame_width;
+ video_format.height = frame_height;
video_format.frame_rate_nom = 60;
video_format.frame_rate_den = 1;
video_format.is_connected = false;
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- video_format.stride = width * 4;
- video_frame.len = width * height * 4;
+ video_format.stride = frame_width * 4;
+ video_frame.len = frame_width * frame_height * 4;
memset(video_frame.data, 0, video_frame.len);
} else {
- video_format.stride = width;
+ video_format.stride = frame_width;
current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
current_frame_ycbcr_format.full_range = true;
current_frame_ycbcr_format.num_levels = 256;
current_frame_ycbcr_format.cb_y_position = 0.0f;
current_frame_ycbcr_format.cr_x_position = 0.0f;
current_frame_ycbcr_format.cr_y_position = 0.0f;
- video_frame.len = width * height * 2;
- memset(video_frame.data, 0, width * height);
- memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar.
+ video_frame.len = frame_width * frame_height * 2;
+ memset(video_frame.data, 0, frame_width * frame_height);
+ memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar.
}
- frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
- video_frame, /*video_offset=*/0, video_format,
- FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+ if (frame_callback != nullptr) {
+ frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
+ video_frame, /*video_offset=*/0, video_format,
+ FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+ }
last_frame_was_connected = false;
}
}
}
+AVPixelFormat get_vaapi_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
+{
+ for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
+ for (int i = 0;; ++i) { // Termination condition inside loop.
+ const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
+ if (config == nullptr) { // End of list.
+ fprintf(stderr, "Decoder %s does not support device.\n", ctx->codec->name);
+ break;
+ }
+ if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
+ config->device_type == AV_HWDEVICE_TYPE_VAAPI &&
+ config->pix_fmt == *fmt_ptr) {
+ return config->pix_fmt;
+ }
+ }
+ }
+
+ // We found no VA-API formats, so take the best software format.
+ return fmt[0];
+}
+
bool FFmpegCapture::play_video(const string &pathname)
{
// Note: Call before open, not after; otherwise, there's a race.
} else {
last_modified = buf.st_mtim;
}
+ last_colorspace = static_cast<AVColorSpace>(-1);
+ last_chroma_location = static_cast<AVChromaLocation>(-1);
AVFormatContextWithCloser format_ctx;
if (srt_sock == -1) {
} else {
#ifdef HAVE_SRT
// SRT socket, already opened.
- AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
+ const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
mpegts_fmt, /*options=*/nullptr,
AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
// Open video decoder.
const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
- AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
+ const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
+
video_timebase = format_ctx->streams[video_stream_index]->time_base;
AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
return false;
}
+
+ // Seemingly, it's not too easy to make something that just initializes
+ // “whatever goes”, so we don't get VDPAU or CUDA here without enumerating
+ // through several different types. VA-API will do for now.
+ AVBufferRef *hw_device_ctx = nullptr;
+ if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) < 0 || true) {
+ fprintf(stderr, "Failed to initialize VA-API for FFmpeg acceleration. Decoding video in software.\n");
+ } else {
+ video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+ video_codec_ctx->get_format = get_vaapi_hw_format;
+ }
+
if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
return false;
fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
return false;
}
- AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
+ const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
if (audio_codec == nullptr) {
fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
return false;
// so don't try).
return true;
}
- if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
+ if (true || av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
return true;
}
}
VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
+ if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
+ // Invalid frame rate; try constructing it from the previous frame length.
+ // (This is especially important if we are the master card, for SRT,
+ // since it affects audio. Not all senders have good timebases
+ // (e.g., Larix rounds first to timebase 1000 and then multiplies by
+ // 90 from there, it seems), but it's much better to have an oscillating
+ // value than just locking at 60.
+ if (last_pts != 0 && frame->pts > last_pts) {
+ int64_t pts_diff = frame->pts - last_pts;
+ video_format.frame_rate_nom = video_timebase.den;
+ video_format.frame_rate_den = video_timebase.num * pts_diff;
+ } else {
+ video_format.frame_rate_nom = 60;
+ video_format.frame_rate_den = 1;
+ }
+ }
UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
if (error) {
return false;
timecode += MAX_FPS * 2 + 1;
}
last_neutral_color = get_neutral_color(frame->metadata);
- frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
- video_frame.get_and_release(), 0, video_format,
- audio_frame.get_and_release(), 0, audio_format);
+ if (frame_callback != nullptr) {
+ frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
+ video_frame.get_and_release(), 0, video_format,
+ audio_frame.get_and_release(), 0, audio_format);
+ }
first_frame = false;
last_frame = steady_clock::now();
last_frame_was_connected = true;
if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
}
- if (pkt.stream_index == video_stream_index) {
+ if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
+ video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
+ }
+ if (pkt.stream_index == video_stream_index && global_flags.transcode_video) {
if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
*error = true;
return AVFrameWithDeleter(nullptr);
}
- } else if (pkt.stream_index == audio_stream_index) {
+ } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) {
has_audio = true;
if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
// Decode video, if we have a frame.
int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
if (err == 0) {
+ if (video_avframe->format == AV_PIX_FMT_VAAPI) {
+ // Get the frame down to the CPU. (TODO: See if we can keep it
+ // on the GPU all the way, since it will be going up again later.
+ // However, this only works if the OpenGL GPU is the same one.)
+ AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
+ int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
+ if (err != 0) {
+ fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
+ *error = true;
+ return AVFrameWithDeleter(nullptr);
+ }
+ sw_frame->pts = video_avframe->pts;
+ sw_frame->pkt_duration = video_avframe->pkt_duration;
+ video_avframe = move(sw_frame);
+ }
frame_finished = true;
break;
} else if (err != AVERROR(EAGAIN)) {
VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
{
VideoFormat video_format;
- video_format.width = width;
- video_format.height = height;
+ video_format.width = frame_width(frame);
+ video_format.height = frame_height(frame);
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- video_format.stride = width * 4;
+ video_format.stride = frame_width(frame) * 4;
} else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
- video_format.stride = width;
+ video_format.stride = frame_width(frame);
} else {
assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
- video_format.stride = width;
+ video_format.stride = frame_width(frame);
}
video_format.frame_rate_nom = video_timebase.den;
video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
- if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
- // Invalid frame rate.
- video_format.frame_rate_nom = 60;
- video_format.frame_rate_den = 1;
- }
video_format.has_signal = true;
video_format.is_connected = true;
return video_format;
sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
sws_ctx.reset(
sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
- width, height, sws_dst_format,
+ frame_width(frame), frame_height(frame), sws_dst_format,
SWS_BICUBIC, nullptr, nullptr, nullptr));
sws_last_width = frame->width;
sws_last_height = frame->height;
int linesizes[4] = { 0, 0, 0, 0 };
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
pic_data[0] = video_frame->data;
- linesizes[0] = width * 4;
- video_frame->len = (width * 4) * height;
+ linesizes[0] = frame_width(frame) * 4;
+ video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
} else if (pixel_format == PixelFormat_NV12) {
pic_data[0] = video_frame->data;
- linesizes[0] = width;
+ linesizes[0] = frame_width(frame);
- pic_data[1] = pic_data[0] + width * height;
- linesizes[1] = width;
+ pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
+ linesizes[1] = frame_width(frame);
- video_frame->len = (width * 2) * height;
+ video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
- current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
+ current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
} else {
assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
- int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w);
- int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h);
+ int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
+ int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
pic_data[0] = video_frame->data;
- linesizes[0] = width;
+ linesizes[0] = frame_width(frame);
- pic_data[1] = pic_data[0] + width * height;
+ pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
linesizes[1] = chroma_width;
pic_data[2] = pic_data[1] + chroma_width * chroma_height;
linesizes[2] = chroma_width;
- video_frame->len = width * height + 2 * chroma_width * chroma_height;
+ video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
- current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
+ current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
}
sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
return should_interrupt.load();
}
+unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
+{
+ if (width == 0) {
+ return frame->width;
+ } else {
+ return width;
+ }
+}
+
+unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
+{
+ if (height == 0) {
+ return frame->height;
+ } else {
+ return height;
+ }
+}
+
#ifdef HAVE_SRT
int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
{