#ifdef HAVE_SRT
FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
: srt_sock(srt_sock),
- width(global_flags.width),
- height(global_flags.height),
+ width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
+ height(0),
pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
video_timebase{1, 1}
{
VideoMode mode;
char buf[256];
- snprintf(buf, sizeof(buf), "%ux%u", width, height);
+ snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
mode.name = buf;
mode.autodetect = false;
- mode.width = width;
- mode.height = height;
+ mode.width = sws_last_width;
+ mode.height = sws_last_height;
mode.frame_rate_num = 60;
mode.frame_rate_den = 1;
mode.interlaced = false;
{
// Send an empty frame to signal that we have no signal anymore.
FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
+ size_t frame_width = width == 0 ? global_flags.width : width;
+ size_t frame_height = height == 0 ? global_flags.height : height;
if (video_frame.data) {
VideoFormat video_format;
- video_format.width = width;
- video_format.height = height;
+ video_format.width = frame_width;
+ video_format.height = frame_height;
video_format.frame_rate_nom = 60;
video_format.frame_rate_den = 1;
video_format.is_connected = false;
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- video_format.stride = width * 4;
- video_frame.len = width * height * 4;
+ video_format.stride = frame_width * 4;
+ video_frame.len = frame_width * frame_height * 4;
memset(video_frame.data, 0, video_frame.len);
} else {
- video_format.stride = width;
+ video_format.stride = frame_width;
current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
current_frame_ycbcr_format.full_range = true;
current_frame_ycbcr_format.num_levels = 256;
current_frame_ycbcr_format.cb_y_position = 0.0f;
current_frame_ycbcr_format.cr_x_position = 0.0f;
current_frame_ycbcr_format.cr_y_position = 0.0f;
- video_frame.len = width * height * 2;
- memset(video_frame.data, 0, width * height);
- memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar.
+ video_frame.len = frame_width * frame_height * 2;
+ memset(video_frame.data, 0, frame_width * frame_height);
+ memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar.
}
frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
{
VideoFormat video_format;
- video_format.width = width;
- video_format.height = height;
+ video_format.width = frame_width(frame);
+ video_format.height = frame_height(frame);
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- video_format.stride = width * 4;
+ video_format.stride = frame_width(frame) * 4;
} else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
- video_format.stride = width;
+ video_format.stride = frame_width(frame);
} else {
assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
- video_format.stride = width;
+ video_format.stride = frame_width(frame);
}
video_format.frame_rate_nom = video_timebase.den;
video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
sws_ctx.reset(
sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
- width, height, sws_dst_format,
+ frame_width(frame), frame_height(frame), sws_dst_format,
SWS_BICUBIC, nullptr, nullptr, nullptr));
sws_last_width = frame->width;
sws_last_height = frame->height;
int linesizes[4] = { 0, 0, 0, 0 };
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
pic_data[0] = video_frame->data;
- linesizes[0] = width * 4;
- video_frame->len = (width * 4) * height;
+ linesizes[0] = frame_width(frame) * 4;
+ video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
} else if (pixel_format == PixelFormat_NV12) {
pic_data[0] = video_frame->data;
- linesizes[0] = width;
+ linesizes[0] = frame_width(frame);
- pic_data[1] = pic_data[0] + width * height;
- linesizes[1] = width;
+ pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
+ linesizes[1] = frame_width(frame);
- video_frame->len = (width * 2) * height;
+ video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
- int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w);
- int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h);
+ int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
+ int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
pic_data[0] = video_frame->data;
- linesizes[0] = width;
+ linesizes[0] = frame_width(frame);
- pic_data[1] = pic_data[0] + width * height;
+ pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
linesizes[1] = chroma_width;
pic_data[2] = pic_data[1] + chroma_width * chroma_height;
linesizes[2] = chroma_width;
- video_frame->len = width * height + 2 * chroma_width * chroma_height;
+ video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
}
return should_interrupt.load();
}
+unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
+{
+ if (width == 0) {
+ return frame->width;
+ } else {
+ return width;
+ }
+}
+
+unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
+{
+ if (height == 0) {
+ return frame->height;
+ } else {
+ return width;
+ }
+}
+
#ifdef HAVE_SRT
int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
{