#include "ffmpeg_capture.h"
+#include "defs.h"
+#include "shared/shared_defs.h"
#include <assert.h>
+#include <errno.h>
+#include <epoxy/egl.h>
+#include <limits>
+#include <map>
+#include <memory>
+#include <movit/effect.h>
+#include <movit/image_format.h>
+#include <movit/ycbcr.h>
+#include <mutex>
#include <pthread.h>
+#include <time.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <string>
#include <sys/stat.h>
-#include <unistd.h>
+#include <thread>
+#include <QSurface>
extern "C" {
#include <libavcodec/avcodec.h>
+#include <libavcodec/codec.h>
+#include <libavcodec/codec_id.h>
+#include <libavcodec/codec_par.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
+#include <libavutil/buffer.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/common.h>
+#include <libavutil/dict.h>
#include <libavutil/error.h>
#include <libavutil/frame.h>
-#include <libavutil/imgutils.h>
-#include <libavutil/mem.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/pixdesc.h>
#include <libavutil/pixfmt.h>
-#include <libavutil/opt.h>
+#include <libavutil/rational.h>
+#include <libavutil/samplefmt.h>
+#include <libavutil/version.h>
+#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
}
#include <movit/colorspace_conversion_effect.h>
#include "bmusb/bmusb.h"
+#include "shared/context.h"
#include "shared/ffmpeg_raii.h"
#include "ffmpeg_util.h"
#include "flags.h"
-#include "image_input.h"
#include "ref_counted_frame.h"
#include "shared/timebase.h"
} // namespace
-FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
- : filename(filename), width(width), height(height), video_timebase{1, 1}
+FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height, QSurface *surface)
+ : filename(filename), width(width), height(height), video_timebase{1, 1}, surface(surface)
{
description = "Video: " + filename;
}
#ifdef HAVE_SRT
-FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
+FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id, QSurface *surface)
: srt_sock(srt_sock),
width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
height(0),
pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
- video_timebase{1, 1}
+ video_timebase{1, 1},
+ surface(surface)
{
if (stream_id.empty()) {
description = "SRT stream";
srt_close(srt_sock);
}
#endif
+ delete surface;
}
void FFmpegCapture::configure_card()
snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
pthread_setname_np(pthread_self(), thread_name);
+ // We need a context in case create_frame() needs to reallocate something.
+ // (If none is given, we are probably in Kaeru, which uses MallocFrameAllocator
+ // anyway, which doesn't reallocate currently and definitely doesn't need
+ // an active OpenGL context to do so.)
+ QOpenGLContext *context = nullptr;
+ if (surface != nullptr) {
+ context = create_context(this->surface);
+ eglBindAPI(EGL_OPENGL_API);
+ if (!make_current(context, this->surface)) {
+ printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
+ eglGetError());
+ abort();
+ }
+ }
+
while (!producer_thread_should_quit.should_quit()) {
string filename_copy;
{
}
if (has_dequeue_callbacks) {
- dequeue_cleanup_callback();
+ dequeue_cleanup_callback();
has_dequeue_callbacks = false;
- }
+ }
+
+ delete_context(context);
}
void FFmpegCapture::send_disconnected_frame()
fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
return false;
}
- unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
- video_codec_ctx.get(), avcodec_close);
// Used in decode_ycbcr_format().
is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
return false;
}
}
- unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
- audio_codec_ctx.get(), avcodec_close);
internal_rewind();
}
audio_format->num_channels = 2;
- int64_t channel_layout = audio_avframe->channel_layout;
- if (channel_layout == 0) {
- channel_layout = av_get_default_channel_layout(audio_avframe->channels);
+ AVChannelLayout channel_layout = audio_avframe->ch_layout;
+ if (!av_channel_layout_check(&channel_layout) ||
+ channel_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
+ av_channel_layout_default(&channel_layout, audio_avframe->ch_layout.nb_channels);
}
if (resampler == nullptr ||
audio_avframe->format != last_src_format ||
dst_format != last_dst_format ||
- channel_layout != last_channel_layout ||
+ av_channel_layout_compare(&channel_layout, &last_channel_layout) != 0||
audio_avframe->sample_rate != last_sample_rate) {
+ // TODO: When we get C++20, use AV_CHANNEL_LAYOUT_STEREO_DOWNMIX.
+ AVChannelLayout stereo_downmix;
+ stereo_downmix.order = AV_CHANNEL_ORDER_NATIVE;
+ stereo_downmix.nb_channels = 2;
+ stereo_downmix.u.mask = AV_CH_LAYOUT_STEREO_DOWNMIX;
+
swr_free(&resampler);
- resampler = swr_alloc_set_opts(nullptr,
- /*out_ch_layout=*/AV_CH_LAYOUT_STEREO_DOWNMIX,
- /*out_sample_fmt=*/dst_format,
- /*out_sample_rate=*/OUTPUT_FREQUENCY,
- /*in_ch_layout=*/channel_layout,
- /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
- /*in_sample_rate=*/audio_avframe->sample_rate,
- /*log_offset=*/0,
- /*log_ctx=*/nullptr);
-
- if (resampler == nullptr) {
+ resampler = nullptr;
+ int err = swr_alloc_set_opts2(&resampler,
+ /*out_ch_layout=*/&stereo_downmix,
+ /*out_sample_fmt=*/dst_format,
+ /*out_sample_rate=*/OUTPUT_FREQUENCY,
+ /*in_ch_layout=*/&channel_layout,
+ /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
+ /*in_sample_rate=*/audio_avframe->sample_rate,
+ /*log_offset=*/0,
+ /*log_ctx=*/nullptr);
+
+ if (err != 0 || resampler == nullptr) {
fprintf(stderr, "Allocating resampler failed.\n");
abort();
}
int out_samples = swr_convert(resampler, &data, num_samples_room,
const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
if (out_samples < 0) {
- fprintf(stderr, "Audio conversion failed.\n");
- abort();
- }
+ fprintf(stderr, "Audio conversion failed.\n");
+ abort();
+ }
audio_frame->len += out_samples * bytes_per_sample;
}
{
*error = false;
- UniqueFrame video_frame(video_frame_allocator->alloc_frame());
+ UniqueFrame video_frame(video_frame_allocator->create_frame(frame->width, frame->height, frame->width));
if (video_frame->data == nullptr) {
return video_frame;
}
// FIXME: Currently, if the video is too high-res for one of the allocated
// frames, we simply refuse to scale it here to avoid crashes. It would be better
// if we could somehow signal getting larger frames, especially as 4K is a thing now.
- if (video_frame->len > FRAME_SIZE) {
- fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
+ if (video_frame->len > video_frame->size) {
+ fprintf(stderr, "%s: Decoded frame would be larger than supported frame size (%zu > %zu), not decoding.\n", pathname.c_str(), video_frame->len, video_frame->size);
*error = true;
return video_frame;
}