From: Steinar H. Gunderson Date: Mon, 11 May 2020 17:01:13 +0000 (+0200) Subject: Allow SRT inputs to pass through without a rescale (only colorspace conversion). X-Git-Tag: 2.0.0~38 X-Git-Url: https://git.sesse.net/?a=commitdiff_plain;h=446d357f647112a5e164d6526cdba9ed9ade9fc6;p=nageru Allow SRT inputs to pass through without a rescale (only colorspace conversion). --- diff --git a/nageru/ffmpeg_capture.cpp b/nageru/ffmpeg_capture.cpp index 4bc277a..0c3aa57 100644 --- a/nageru/ffmpeg_capture.cpp +++ b/nageru/ffmpeg_capture.cpp @@ -264,8 +264,8 @@ FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned he #ifdef HAVE_SRT FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id) : srt_sock(srt_sock), - width(global_flags.width), - height(global_flags.height), + width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general. + height(0), pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar), video_timebase{1, 1} { @@ -327,12 +327,12 @@ std::map FFmpegCapture::get_available_video_modes() const VideoMode mode; char buf[256]; - snprintf(buf, sizeof(buf), "%ux%u", width, height); + snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height); mode.name = buf; mode.autodetect = false; - mode.width = width; - mode.height = height; + mode.width = sws_last_width; + mode.height = sws_last_height; mode.frame_rate_num = 60; mode.frame_rate_den = 1; mode.interlaced = false; @@ -398,19 +398,21 @@ void FFmpegCapture::send_disconnected_frame() { // Send an empty frame to signal that we have no signal anymore. FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame(); + size_t frame_width = width == 0 ? global_flags.width : width; + size_t frame_height = height == 0 ? global_flags.height : height; if (video_frame.data) { VideoFormat video_format; - video_format.width = width; - video_format.height = height; + video_format.width = frame_width; + video_format.height = frame_height; video_format.frame_rate_nom = 60; video_format.frame_rate_den = 1; video_format.is_connected = false; if (pixel_format == bmusb::PixelFormat_8BitBGRA) { - video_format.stride = width * 4; - video_frame.len = width * height * 4; + video_format.stride = frame_width * 4; + video_frame.len = frame_width * frame_height * 4; memset(video_frame.data, 0, video_frame.len); } else { - video_format.stride = width; + video_format.stride = frame_width; current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709; current_frame_ycbcr_format.full_range = true; current_frame_ycbcr_format.num_levels = 256; @@ -420,9 +422,9 @@ void FFmpegCapture::send_disconnected_frame() current_frame_ycbcr_format.cb_y_position = 0.0f; current_frame_ycbcr_format.cr_x_position = 0.0f; current_frame_ycbcr_format.cr_y_position = 0.0f; - video_frame.len = width * height * 2; - memset(video_frame.data, 0, width * height); - memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar. + video_frame.len = frame_width * frame_height * 2; + memset(video_frame.data, 0, frame_width * frame_height); + memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar. } frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++, @@ -918,15 +920,15 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator:: VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase) { VideoFormat video_format; - video_format.width = width; - video_format.height = height; + video_format.width = frame_width(frame); + video_format.height = frame_height(frame); if (pixel_format == bmusb::PixelFormat_8BitBGRA) { - video_format.stride = width * 4; + video_format.stride = frame_width(frame) * 4; } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) { - video_format.stride = width; + video_format.stride = frame_width(frame); } else { assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); - video_format.stride = width; + video_format.stride = frame_width(frame); } video_format.frame_rate_nom = video_timebase.den; video_format.frame_rate_den = frame->pkt_duration * video_timebase.num; @@ -956,7 +958,7 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string & sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format); sws_ctx.reset( sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format), - width, height, sws_dst_format, + frame_width(frame), frame_height(frame), sws_dst_format, SWS_BICUBIC, nullptr, nullptr, nullptr)); sws_last_width = frame->width; sws_last_height = frame->height; @@ -972,16 +974,16 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string & int linesizes[4] = { 0, 0, 0, 0 }; if (pixel_format == bmusb::PixelFormat_8BitBGRA) { pic_data[0] = video_frame->data; - linesizes[0] = width * 4; - video_frame->len = (width * 4) * height; + linesizes[0] = frame_width(frame) * 4; + video_frame->len = (frame_width(frame) * 4) * frame_height(frame); } else if (pixel_format == PixelFormat_NV12) { pic_data[0] = video_frame->data; - linesizes[0] = width; + linesizes[0] = frame_width(frame); - pic_data[1] = pic_data[0] + width * height; - linesizes[1] = width; + pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame); + linesizes[1] = frame_width(frame); - video_frame->len = (width * 2) * height; + video_frame->len = (frame_width(frame) * 2) * frame_height(frame); const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format); current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg); @@ -989,19 +991,19 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string & assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar); const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format); - int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w); - int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h); + int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w); + int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h); pic_data[0] = video_frame->data; - linesizes[0] = width; + linesizes[0] = frame_width(frame); - pic_data[1] = pic_data[0] + width * height; + pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame); linesizes[1] = chroma_width; pic_data[2] = pic_data[1] + chroma_width * chroma_height; linesizes[2] = chroma_width; - video_frame->len = width * height + 2 * chroma_width * chroma_height; + video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height; current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg); } @@ -1020,6 +1022,24 @@ int FFmpegCapture::interrupt_cb() return should_interrupt.load(); } +unsigned FFmpegCapture::frame_width(const AVFrame *frame) const +{ + if (width == 0) { + return frame->width; + } else { + return width; + } +} + +unsigned FFmpegCapture::frame_height(const AVFrame *frame) const +{ + if (height == 0) { + return frame->height; + } else { + return width; + } +} + #ifdef HAVE_SRT int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size) { diff --git a/nageru/ffmpeg_capture.h b/nageru/ffmpeg_capture.h index 685191a..3567312 100644 --- a/nageru/ffmpeg_capture.h +++ b/nageru/ffmpeg_capture.h @@ -266,11 +266,14 @@ private: int read_srt(uint8_t *buf, int buf_size); #endif + inline unsigned frame_width(const AVFrame *frame) const; + inline unsigned frame_height(const AVFrame *frame) const; + mutable std::mutex filename_mu; std::string description, filename; int srt_sock = -1; uint16_t timecode = 0; - unsigned width, height; + unsigned width, height; // 0 means keep input size. bmusb::PixelFormat pixel_format = bmusb::PixelFormat_8BitBGRA; movit::YCbCrFormat current_frame_ycbcr_format; bool running = false;